node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
6 lines (5 loc) • 360 B
TypeScript
import { ModelDownloadEndpoints } from "./modelDownloadEndpoints.js";
export type ModelFileAccessTokens = {
huggingFace?: string;
};
export declare function resolveModelFileAccessTokensTryHeaders(modelUrl: string, tokens?: ModelFileAccessTokens, endpoints?: ModelDownloadEndpoints, baseHeaders?: Record<string, string>): Promise<Record<string, string>[]>;