node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
20 lines (19 loc) • 816 B
TypeScript
import { ChatHistoryItem, ChatModelFunctions, ChatWrapperSettings } from "../../../types.js";
import { UniqueIdGenerator } from "./UniqueIdGenerator.js";
export declare function extractFunctionCallSettingsFromJinjaTemplate({ idsGenerator, renderTemplate }: {
idsGenerator: UniqueIdGenerator;
renderTemplate({}: {
chatHistory: ChatHistoryItem[];
functions: ChatModelFunctions;
additionalParams: Record<string, unknown>;
stringifyFunctionParams: boolean;
stringifyFunctionResults: boolean;
combineModelMessageAndToolCalls: boolean;
squashModelTextResponses?: boolean;
}): string;
}): {
settings: ChatWrapperSettings["functions"] | null;
stringifyParams: boolean;
stringifyResult: boolean;
combineModelMessageAndToolCalls: boolean;
};