node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
12 lines (11 loc) • 471 B
TypeScript
import { LlamaContext } from "../../evaluator/LlamaContext/LlamaContext.js";
export declare function printCommonInfoLines({ context, draftContext, minTitleLength, useMmap, logBatchSize, tokenMeterEnabled, printBos, printEos }: {
context: LlamaContext;
draftContext?: LlamaContext;
minTitleLength?: number;
useMmap?: boolean;
logBatchSize?: boolean;
tokenMeterEnabled?: boolean;
printBos?: boolean;
printEos?: boolean;
}): Promise<number>;