UNPKG

node-llama-cpp

Version:

Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level

33 lines (32 loc) 1.62 kB
import { ChatWrapper } from "../ChatWrapper.js"; import { ChatHistoryItem, ChatModelFunctions, ChatWrapperGenerateContextStateOptions, ChatWrapperGeneratedContextState, ChatWrapperSettings } from "../types.js"; export declare class Llama3_1ChatWrapper extends ChatWrapper { readonly wrapperName: string; readonly cuttingKnowledgeDate?: Date | (() => Date) | null; readonly todayDate: Date | (() => Date) | null; readonly noToolInstructions: boolean; readonly settings: ChatWrapperSettings; constructor(options?: { /** * Set to `null` to disable * * Defaults to December 2023 */ cuttingKnowledgeDate?: Date | (() => Date) | number | string | null; /** * Set to `null` to disable * * Defaults to current date */ todayDate?: Date | (() => Date) | number | string | null; noToolInstructions?: boolean; }); addAvailableFunctionsSystemMessageToHistory(history: readonly ChatHistoryItem[], availableFunctions?: ChatModelFunctions, { documentParams }?: { documentParams?: boolean; }): readonly ChatHistoryItem[]; generateContextState({ chatHistory, availableFunctions, documentFunctionParams }: ChatWrapperGenerateContextStateOptions): ChatWrapperGeneratedContextState; generateAvailableFunctionsSystemText(availableFunctions: ChatModelFunctions, { documentParams }: { documentParams?: boolean; }): import("../utils/LlamaText.js")._LlamaText; prependPreambleToChatHistory(chatHistory: readonly ChatHistoryItem[]): readonly ChatHistoryItem[]; }