node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
45 lines • 1.85 kB
JavaScript
import { fileURLToPath } from "url";
import path from "path";
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import fs from "fs-extra";
import { cliBinName, documentationPageUrls } from "../config.js";
import { setIsRunningFromCLI } from "../state.js";
import { withCliCommandDescriptionDocsUrl } from "./utils/withCliCommandDescriptionDocsUrl.js";
import { PullCommand } from "./commands/PullCommand.js";
import { ChatCommand } from "./commands/ChatCommand.js";
import { InitCommand } from "./commands/InitCommand.js";
import { SourceCommand } from "./commands/source/SourceCommand.js";
import { CompleteCommand } from "./commands/CompleteCommand.js";
import { InfillCommand } from "./commands/InfillCommand.js";
import { InspectCommand } from "./commands/inspect/InspectCommand.js";
import { OnPostInstallCommand } from "./commands/OnPostInstallCommand.js";
import { DebugCommand } from "./commands/DebugCommand.js";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
const packageJson = fs.readJSONSync(path.join(__dirname, "..", "..", "package.json"));
setIsRunningFromCLI(true);
const yarg = yargs(hideBin(process.argv));
yarg
.scriptName(cliBinName)
.usage(withCliCommandDescriptionDocsUrl("Usage: $0 <command> [options]", documentationPageUrls.CLI.index))
.command(PullCommand)
.command(ChatCommand)
.command(InitCommand)
.command(SourceCommand)
.command(CompleteCommand)
.command(InfillCommand)
.command(InspectCommand)
.command(OnPostInstallCommand)
.command(DebugCommand)
.recommendCommands()
.demandCommand(1)
.strict()
.strictCommands()
.alias("v", "version")
.help("h")
.alias("h", "help")
.version(packageJson.version)
.wrap(Math.min(130, yarg.terminalWidth()))
.parse();
//# sourceMappingURL=cli.js.map