node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
5 lines (4 loc) • 392 B
TypeScript
import { ChatWrapper } from "../../ChatWrapper.js";
import { Tokenizer } from "../../types.js";
import { JinjaTemplateChatWrapperOptions } from "../generic/JinjaTemplateChatWrapper.js";
export declare function isJinjaTemplateEquivalentToSpecializedChatWrapper(jinjaTemplateWrapperOptions: JinjaTemplateChatWrapperOptions, specializedChatWrapper: ChatWrapper, tokenizer?: Tokenizer): boolean;