node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
10 lines (9 loc) • 371 B
TypeScript
import { Llama } from "../../bindings/Llama.js";
export declare function interactivelyAskForModel({ llama, modelsDirectory, allowLocalModels, downloadIntent, flashAttention, useMmap }: {
llama: Llama;
modelsDirectory?: string;
allowLocalModels?: boolean;
downloadIntent?: boolean;
flashAttention?: boolean;
useMmap?: boolean;
}): Promise<string>;