node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
16 lines (15 loc) • 595 B
TypeScript
import { ParsedModelUri } from "./parseModelUri.js";
export type ResolveModelDestination = {
type: "url";
url: string;
} | {
type: "uri";
url?: string;
uri: string;
parsedUri: ParsedModelUri;
} | {
type: "file";
path: string;
};
export declare function resolveModelDestination(modelDestination: string, convertUrlToUri?: boolean): ResolveModelDestination;
export declare function resolveModelArgToFilePathOrUrl(modelDestination: string, optionHeaders?: Record<string, string>): Promise<[resolvedModelDestination: ResolveModelDestination, filePathOrUrl: string]>;