node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
9 lines (8 loc) • 365 B
TypeScript
import { GgufFileReader } from "../fileReaders/GgufFileReader.js";
import { GgufFileInfo } from "../types/GgufFileInfoTypes.js";
export declare function parseGguf({ fileReader, readTensorInfo, ignoreKeys, logWarnings }: {
fileReader: GgufFileReader;
readTensorInfo?: boolean;
ignoreKeys?: string[];
logWarnings?: boolean;
}): Promise<GgufFileInfo>;