@continue-reasoning/mini-agent
Version:
A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities
104 lines • 3.71 kB
TypeScript
/**
* @fileoverview Simplified GeminiChat Implementation
*
* This module provides a GeminiChat implementation that follows our unified IChat framework
* using Gemini's Chat API for maximum simplicity and consistency.
*
* Key design principles:
* - Uses Gemini Chat API directly (no over-abstraction)
* - Follows unified event streaming (Start → Delta → Done → Complete)
* - Automatic history management on Done events
* - Final-only token usage strategy
* - Single content per message (no complex multi-part)
*/
import type { Content as GeminiContent } from '@google/genai';
import { IChat, LLMResponse, ChunkItem, MessageItem, IChatConfig } from './interfaces';
import { ITokenTracker, ITokenUsage } from './interfaces';
type GeminiMessage = GeminiContent;
/**
* GeminiChat - Simplified implementation using Chat API
*
* This class eliminates the complex over-abstraction of the previous implementation
* and directly uses Gemini's Chat API while conforming to our unified IChat framework.
*/
export declare class GeminiChat implements IChat<GeminiMessage> {
private readonly chatConfig;
private ai;
private history;
private tokenTracker;
private sendPromise;
private isCurrentlyProcessing;
private logger;
constructor(chatConfig: IChatConfig);
/**
* Initialize history from config, converting old format if necessary
*/
private initializeHistory;
/**
* Convert old history format (parts array) to new format (single content)
*
* Strategy: Split each multi-part message into multiple single-content messages
*/
private convertHistoryToNewFormat;
/**
* Split multi-part message into multiple single-content messages
*/
private splitPartsToMessages;
/**
* Create a fresh chat instance with current history
*
* We create a new instance each time to ensure history synchronization
*/
private createChatInstance;
/**
* CORE METHOD: Send message and get streaming response
*
* Implements our unified streaming pattern: Start → Delta → Done → Complete
*/
sendMessageStream(message: MessageItem, promptId: string): Promise<AsyncGenerator<LLMResponse>>;
/**
* Create streaming response with unified event flow
*
* This follows our standard pattern: LLMStart → LLMChunk*Delta → LLMChunk*Done → LLMComplete
*/
private createStreamingResponse;
/**
* Convert our MessageItem to Gemini format
*/
convertToProviderMessage(message: MessageItem): GeminiMessage;
/**
* Convert our MessageItem to Gemini message format (for streaming)
*/
private convertMessageToGemini;
/**
* Convert ContentPart to GeminiPart
*/
private convertContentPartToGeminiPart;
/**
* Convert ChunkItem to MessageItem for history
*
* CRITICAL: This is called automatically for all Done events
*/
convertFromChunkItems(chunk: ChunkItem, role: 'user' | 'assistant'): MessageItem;
/**
* Convert our history to Gemini format
*/
private convertHistoryToGemini;
getHistory(_curated?: boolean): MessageItem[];
clearHistory(): void;
addHistory(content: MessageItem): void;
setHistory(history: MessageItem[]): void;
setSystemPrompt(systemPrompt: string): void;
getSystemPrompt(): string | undefined;
getTokenUsage(): ITokenUsage;
getTokenTracker(): ITokenTracker;
isProcessing(): boolean;
getModelInfo(): {
model: string;
tokenLimit: number;
};
handleModelFallback(fallbackModel: string): boolean;
getUsageSummary(): string;
}
export {};
//# sourceMappingURL=geminiChat.d.ts.map