node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
20 lines (19 loc) • 994 B
TypeScript
import { ChatWrapper } from "../ChatWrapper.js";
import { ChatWrapperGenerateContextStateOptions, ChatWrapperGeneratedContextState } from "../types.js";
/**
* This chat wrapper is not safe against chat syntax injection attacks
* ([learn more](https://node-llama-cpp.withcat.ai/guide/llama-text#input-safety-in-node-llama-cpp)).
*/
export declare class FalconChatWrapper extends ChatWrapper {
readonly wrapperName: string;
constructor({ userMessageTitle, modelResponseTitle, middleSystemMessageTitle, allowSpecialTokensInTitles }?: {
userMessageTitle?: string;
modelResponseTitle?: string;
middleSystemMessageTitle?: string;
allowSpecialTokensInTitles?: boolean;
});
get userMessageTitle(): string;
get modelResponseTitle(): string;
get middleSystemMessageTitle(): string;
generateContextState({ chatHistory, availableFunctions, documentFunctionParams }: ChatWrapperGenerateContextStateOptions): ChatWrapperGeneratedContextState;
}