@continue-reasoning/mini-agent
Version:
A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities
170 lines • 5.81 kB
TypeScript
/**
* @fileoverview OpenAI Response API Chat implementation
*
* This module provides an OpenAI Chat implementation based on the Response API
* that follows the IChat interface and integrates with our Agent framework.
* It uses OpenAI's Response API for streaming responses which provides more
* structured event-based streaming compared to traditional chat completions.
*
* Key features:
* - Response API streaming for structured events
* - Event-based response handling
* - Integrated token tracking with ITokenTracker
* - Dual history system (comprehensive vs curated)
* - Platform-agnostic content types
* - Robust error handling and validation
* - Function calling support with streaming
*/
import OpenAI from 'openai';
import { IChat, LLMResponse, ChunkItem, MessageItem, IChatConfig } from './interfaces';
import { ITokenTracker, ITokenUsage } from './interfaces';
type OpenaiMessageItem = OpenAI.Responses.ResponseInputItem;
/**
* OpenAI Response API Chat implementation using our platform-agnostic interfaces
*
* This class provides streaming chat functionality using OpenAI's Response API
* which offers event-based streaming with more structured response handling.
* It implements the IChat interface and works with our ConversationContent type
* system while interfacing with OpenAI's Response API.
*
* Key implementation details:
* - Uses OpenAI Response API for structured streaming
* - Event-based response processing
* - Converts between our types and OpenAI's types
* - Maintains conversation history in our format
* - Provides real-time token tracking
* - Supports function calling with proper streaming
*/
export declare class OpenAIChatResponse implements IChat<OpenaiMessageItem> {
private readonly chatConfig;
private history;
private tokenTracker;
private sendPromise;
private isCurrentlyProcessing;
private openai;
private logger;
private lastResponseId;
private enableCacheOptimization;
constructor(chatConfig: IChatConfig);
/**
* Send a message and get streaming response
*
* Implements the IChat interface for streaming message sending using Response API.
* Converts our ChatMessage format to OpenAI Response API format and processes
* the event-based streaming response.
*
* @param message - Message in our ChatMessage format
* @param promptId - Unique identifier for this prompt
* @returns AsyncGenerator yielding LLMResponse objects
*/
sendMessageStream(message: MessageItem, promptId: string): Promise<AsyncGenerator<LLMResponse>>;
/**
* Create streaming response with internal initialization using Response API
*
* This method immediately returns an AsyncGenerator and handles all initialization
* (connection, auth, retries) internally within the generator. This eliminates
* the initial await delay and provides true streaming from the first moment.
*/
private createStreamingResponse;
/**
* Internal stream processing for Response API events
*
* This processes the event-based streaming response from the Response API,
* handling different event types and converting them to our format.
*/
private processResponseStreamInternal;
/**
* Extract curated history (valid interactions only)
*/
private extractCuratedHistory;
/**
* Get conversation history
*/
getHistory(curated?: boolean): MessageItem[];
/**
* Clear conversation history
*/
clearHistory(): void;
/**
* Add content to conversation history
*/
addHistory(content: MessageItem): void;
/**
* Set entire conversation history
*/
setHistory(history: MessageItem[]): void;
/**
* Get current token usage tracking
*/
getTokenUsage(): ITokenUsage;
/**
* Get token tracker instance
*/
getTokenTracker(): ITokenTracker;
/**
* Check if chat is currently processing a message
*/
isProcessing(): boolean;
/**
* Get current model information
*/
getModelInfo(): {
model: string;
tokenLimit: number;
};
/**
* Set system prompt
*/
setSystemPrompt(systemPrompt: string): void;
/**
* Get current system prompt
*/
getSystemPrompt(): string | undefined;
/**
* Handle model fallback
*/
handleModelFallback(fallbackModel: string): boolean;
/**
* Get usage summary for debugging
*/
getUsageSummary(): string;
/**
* Build incremental input for cache optimization
* Filters history to include only current user message and previous turn's function responses
*/
private buildIncrementalInput;
/**
* Build full history input (standard approach)
*/
private buildFullHistoryInput;
/**
* Check if this is a multi-turn request (continuation turn)
*/
private isMultiTurnRequest;
/**
* Get current turn number from message
*/
private getCurrentTurnFromMessage;
/**
* Enable cache optimization feature
* This is a feature flag to control cache optimization rollout
*/
enableCacheOptimizationFeature(): void;
/**
* Disable cache optimization feature
*/
disableCacheOptimizationFeature(): void;
/**
* Convert our content format to OpenAI's format
*/
convertToProviderMessage(message: MessageItem): OpenaiMessageItem;
/**
* Convert chunk items to message item for history
* @param chunk Chunk to convert
* @param role Role for the resulting message
* @returns Message item for adding to history
*/
convertFromChunkItems(chunk: ChunkItem, role: 'user' | 'assistant'): MessageItem;
}
export {};
//# sourceMappingURL=openaiChat.d.ts.map