UNPKG

@continue-reasoning/agent

Version:

A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities

230 lines 7.77 kB
/** * @fileoverview Platform-agnostic GeminiChat implementation * * This module provides a GeminiChat implementation that follows the IChat interface * and integrates with our Agent framework. It references the core package patterns * but uses our own type system and token tracking. * * Key features: * - Streaming-first approach for real-time responses * - Integrated token tracking with ITokenTracker * - Dual history system (comprehensive vs curated) * - Platform-agnostic content types * - Robust error handling and validation */ import { IChat, ITokenTracker, ITokenUsage, ChatMessage, LLMResponse, ConversationContent, IChatConfig } from './interfaces.js'; /** * GeminiChat implementation using our platform-agnostic interfaces * * This class provides streaming chat functionality with integrated token tracking * and conversation history management. It implements the IChat interface and works * with our ConversationContent type system while interfacing with Google's Gemini API. * * Key implementation details: * - Uses Google's GenerativeAI SDK directly * - Converts between our types and Gemini's types * - Maintains conversation history in our format * - Provides real-time token tracking * - Supports streaming and non-streaming responses */ export declare class GeminiChat implements IChat { private readonly chatConfig; private history; private tokenTracker; private sendPromise; private isCurrentlyProcessing; private aiClient; private contentGenerator; private generateContentConfig; private logger; constructor(chatConfig: IChatConfig); /** * Send a message and get streaming response * * Implements the IChat interface for streaming message sending. * Converts our ChatMessage format to Gemini's format and processes * the streaming response. * * @param message - Message in our ChatMessage format * @param promptId - Unique identifier for this prompt * @returns AsyncGenerator yielding LLMResponse objects */ sendMessageStream(message: ChatMessage, promptId: string): Promise<AsyncGenerator<LLMResponse>>; /** * Create streaming response with internal initialization * * This method immediately returns an AsyncGenerator and handles all initialization * (connection, auth, retries) internally within the generator. This eliminates * the initial await delay and provides true streaming from the first moment. */ private createStreamingResponse; /** * Internal stream processing without separate async generator return * * This is the core streaming logic extracted to work directly within * the main streaming generator. */ private processStreamResponseInternal; /** * Validate LLM response in our format * * Checks if the response contains valid content that should be * included in conversation history. * * @param response - LLM response to validate * @returns True if response is valid */ private isValidLLMResponse; /** * Validate conversation content * * Validates content structure and ensures it contains meaningful data. * Filters out empty text and invalid parts. * * @param content - ConversationContent to validate * @returns True if content is valid */ private isValidConversationContent; /** * Record history after successful interaction * * Adds user input and assistant response to conversation history, * maintaining proper role alternation and filtering invalid content. * * @param userInput - User input in our format * @param modelOutput - Assistant output in our format */ private recordHistory; /** * Extract curated history (valid interactions only) * * Filters conversation history to include only valid user-assistant * interactions, removing any turns with invalid responses. * * @param history - Full conversation history * @returns Curated history with only valid interactions */ private extractCuratedHistory; /** * Get conversation history * * Returns conversation history in our ConversationContent format. * Can optionally return only curated (valid) history. * * @param curated - Whether to return only valid interactions * @returns Array of conversation content */ getHistory(curated?: boolean): ConversationContent[]; /** * Clear conversation history */ clearHistory(): void; /** * Add content to conversation history * * Adds a single conversation content item to the history. * * @param content - Content to add in our format */ addHistory(content: ConversationContent): void; /** * Set entire conversation history * * Replaces the entire conversation history with new content. * Resets token tracking since usage context changes. * * @param history - New conversation history in our format */ setHistory(history: ConversationContent[]): void; /** * Get current token usage tracking */ getTokenUsage(): ITokenUsage; /** * Get token tracker instance */ getTokenTracker(): ITokenTracker; /** * Check if chat is currently processing a message */ isProcessing(): boolean; /** * Get current model information */ getModelInfo(): { model: string; tokenLimit: number; }; /** * Set system prompt * * Updates the system prompt used for subsequent conversations. * * @param systemPrompt - New system prompt text */ setSystemPrompt(systemPrompt: string): void; /** * Get current system prompt * * @returns Current system prompt or undefined if not set */ getSystemPrompt(): string | undefined; /** * Handle model fallback (e.g., pro -> flash) * * Attempts to switch to a fallback model when the current model * encounters issues (quota, rate limits, etc.). * * @param fallbackModel - Model name to fallback to * @returns True if fallback was successful */ handleModelFallback(_fallbackModel: string): boolean; /** * Get usage summary for debugging * * @returns Formatted usage summary string */ getUsageSummary(): string; /** * Convert our content format to Gemini's format * * @param content - Content in our format (string or ContentPart[]) * @param role - Content role * @returns Gemini Content object */ private convertToGeminiContent; /** * Convert Gemini content to our format * * @param geminiContent - Gemini Content object * @returns ConversationContent in our format */ private convertFromGeminiContent; /** * Convert Gemini response to our LLMResponse format * * @param geminiResponse - Gemini GenerateContentResult * @param responseId - Unique response ID * @returns LLMResponse in our format */ private convertToLLMResponse; /** * Get conversation history in Gemini format * * @param curated - Whether to return curated history * @returns History in Gemini's Content format */ private getGeminiHistory; /** * Convert Type enum values (OBJECT, NUMBER, STRING) to lowercase for Gemini API */ private convertTypesToLowercase; /** * Consolidate multiple assistant responses into one * * @param responses - Array of assistant responses * @returns Single consolidated response */ private consolidateAssistantResponses; } //# sourceMappingURL=geminiChat.d.ts.map