node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
17 lines (16 loc) • 702 B
TypeScript
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
import { GgufFileReader } from "./GgufFileReader.js";
type GgufFsFileReaderOptions = {
filePath: string;
signal?: AbortSignal;
};
export declare class GgufFsFileReader extends GgufFileReader {
readonly filePath: string;
private readonly _signal?;
constructor({ filePath, signal }: GgufFsFileReaderOptions);
readByteRange(offset: number | GgufReadOffset, length: number): Buffer<ArrayBuffer> | Promise<Buffer<ArrayBuffer>>;
protected ensureHasByteRange(offset: number | GgufReadOffset, length: number): Promise<void> | undefined;
private _readToExpandBufferUpToOffset;
private _readByteRange;
}
export {};