node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
17 lines (16 loc) • 838 B
TypeScript
import { ChatWrapper } from "../ChatWrapper.js";
import { ChatModelFunctions, ChatWrapperGenerateContextStateOptions, ChatWrapperGeneratedContextState, ChatWrapperSettings } from "../types.js";
export declare class Llama3ChatWrapper extends ChatWrapper {
readonly wrapperName: string;
readonly settings: ChatWrapperSettings;
constructor({ parallelFunctionCalling }?: {
/**
* Defaults to `true`
*/
parallelFunctionCalling?: boolean;
});
generateContextState({ chatHistory, availableFunctions, documentFunctionParams }: ChatWrapperGenerateContextStateOptions): ChatWrapperGeneratedContextState;
generateAvailableFunctionsSystemText(availableFunctions: ChatModelFunctions, { documentParams }: {
documentParams?: boolean;
}): import("../utils/LlamaText.js")._LlamaText;
}