node-llama-cpp
Version:
Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level
17 lines (16 loc) • 616 B
TypeScript
import { ChatHistoryItem, Tokenizer } from "../../../../types.js";
import { ChatWrapper } from "../../../../ChatWrapper.js";
export declare function eraseFirstResponseAndKeepFirstSystemChatContextShiftStrategy({ chatHistory, maxTokensCount, tokenizer, chatWrapper, lastShiftMetadata }: {
chatHistory: ChatHistoryItem[];
maxTokensCount: number;
tokenizer: Tokenizer;
chatWrapper: ChatWrapper;
lastShiftMetadata?: object | null;
}): Promise<{
chatHistory: ChatHistoryItem[];
metadata: CalculationMetadata;
}>;
type CalculationMetadata = {
removedCharactersNumber: number;
};
export {};