node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
26 lines • 839 B
JavaScript
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { setIsRunningFromCLI } from "../state.js";
import { CreateCliCommand } from "./commands/InitCommand.js";
/** @internal */
export function _startCreateCli({ cliBinName, packageVersion, _enable }) {
if (_enable !== Symbol.for("internal"))
return;
setIsRunningFromCLI(true);
const yarg = yargs(hideBin(process.argv));
yarg
.scriptName(cliBinName)
.usage("Usage: $0 [options]")
.command(CreateCliCommand)
.demandCommand(1)
.strict()
.strictCommands()
.alias("v", "version")
.help("h")
.alias("h", "help")
.version(packageVersion)
.wrap(Math.min(100, yarg.terminalWidth()))
.parse();
}
//# sourceMappingURL=startCreateCli.js.map