node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
19 lines • 827 B
JavaScript
import { withCliCommandDescriptionDocsUrl } from "../../utils/withCliCommandDescriptionDocsUrl.js";
import { documentationPageUrls } from "../../../config.js";
import { DownloadCommand } from "./commands/DownloadCommand.js";
import { BuildCommand } from "./commands/BuildCommand.js";
import { ClearCommand } from "./commands/ClearCommand.js";
export const SourceCommand = {
command: "source <command>",
describe: withCliCommandDescriptionDocsUrl("Manage `llama.cpp` source code", documentationPageUrls.CLI.Source.index),
builder(yargs) {
return yargs
.command(DownloadCommand)
.command(BuildCommand)
.command(ClearCommand);
},
async handler() {
// this function must exist, even though we do nothing here
}
};
//# sourceMappingURL=SourceCommand.js.map