node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
9 lines • 579 B
JavaScript
import { BuildLlamaCppCommand } from "./cli/commands/source/commands/BuildCommand.js";
import { DownloadLlamaCppCommand } from "./cli/commands/source/commands/DownloadCommand.js";
import { ClearLlamaCppBuildCommand } from "./cli/commands/source/commands/ClearCommand.js";
import { _startCreateCli } from "./cli/startCreateCli.js";
import { getBuildDefaults } from "./utils/getBuildDefaults.js";
export { BuildLlamaCppCommand, DownloadLlamaCppCommand, ClearLlamaCppBuildCommand, getBuildDefaults };
/** @internal */
export { _startCreateCli };
//# sourceMappingURL=commands.js.map