UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

13 lines (12 loc) 466 B
import { CommandModule } from "yargs"; import { BuildGpu } from "../../bindings/types.js"; type InitCommand = { name?: string; template?: string; model?: string; gpu?: BuildGpu | "auto"; }; export declare const InitCommand: CommandModule<object, InitCommand>; export declare const CreateCliCommand: CommandModule<object, InitCommand>; export declare function InitCommandHandler({ name, template, model, gpu }: InitCommand): Promise<void>; export {};