node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
8 lines (7 loc) • 323 B
TypeScript
export declare function resolveSplitGgufParts(ggufPathOrUri: string): string[];
export declare function getGgufSplitPartsInfo(ggufPath: string): {
part: number;
parts: number;
matchLength: number;
} | null;
export declare function createSplitPartFilename(filename: string, part: number, parts: number): string;