node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
11 lines (10 loc) • 489 B
TypeScript
import { ChatHistoryItem, ChatModelResponse } from "../../types.js";
import { LlamaText } from "../../utils/LlamaText.js";
export declare function chunkChatItems(chatHistory: readonly ChatHistoryItem[], { generateModelResponseText, joinAdjacentMessagesOfTheSameType }: {
generateModelResponseText: (modelResponse: ChatModelResponse["response"]) => LlamaText;
joinAdjacentMessagesOfTheSameType?: boolean;
}): {
system: LlamaText;
user: LlamaText;
model: LlamaText;
}[];