inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
16 lines (15 loc) • 899 B
TypeScript
import { ChatHistoryItem, LlamaTextJSON } from 'node-llama-cpp';
import { CompletionFinishReason, ChatMessage, ToolCallResult } from '../../types/index.js';
import { LlamaChatResult } from './types.js';
export declare function mapFinishReason(nodeLlamaCppFinishReason: LlamaChatResult['stopReason']): CompletionFinishReason;
export declare function addFunctionCallToChatHistory({ chatHistory, functionName, functionDescription, callParams, callResult, rawCall, startsNewChunk }: {
chatHistory: ChatHistoryItem[];
functionName: string;
functionDescription?: string;
callParams: any;
callResult: ToolCallResult;
rawCall?: LlamaTextJSON;
startsNewChunk?: boolean;
}): ChatHistoryItem[];
export declare function createChatMessageArray(messages: ChatMessage[]): ChatHistoryItem[];
export declare function readGBNFFiles(directoryPath: string): Promise<Record<string, string>>;