UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

38 lines (37 loc) 1.03 kB
import { CommandModule } from "yargs"; import { BuildGpu, LlamaNuma } from "../../bindings/types.js"; type CompleteCommand = { modelPath?: string; header?: string[]; gpu?: BuildGpu | "auto"; systemInfo: boolean; text?: string; textFile?: string; contextSize?: number; batchSize?: number; flashAttention?: boolean; swaFullCache?: boolean; threads?: number; temperature: number; minP: number; topK: number; topP: number; seed?: number; gpuLayers?: number; repeatPenalty: number; lastTokensRepeatPenalty: number; penalizeRepeatingNewLine: boolean; repeatFrequencyPenalty?: number; repeatPresencePenalty?: number; maxTokens: number; tokenPredictionDraftModel?: string; tokenPredictionModelContextSize?: number; debug: boolean; numa?: LlamaNuma; meter: boolean; timing: boolean; noMmap: boolean; printTimings: boolean; }; export declare const CompleteCommand: CommandModule<object, CompleteCommand>; export {};