UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

13 lines (12 loc) 511 B
import { LlamaContext } from "../../evaluator/LlamaContext/LlamaContext.js"; export declare function printCommonInfoLines({ context, draftContext, minTitleLength, useMmap, useDirectIo, logBatchSize, tokenMeterEnabled, printBos, printEos }: { context: LlamaContext; draftContext?: LlamaContext; minTitleLength?: number; useMmap?: boolean; useDirectIo?: boolean; logBatchSize?: boolean; tokenMeterEnabled?: boolean; printBos?: boolean; printEos?: boolean; }): Promise<number>;