@continue-reasoning/agent
Version:
A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities
261 lines • 8.89 kB
TypeScript
/**
* @fileoverview BaseAgent Implementation
*
* This file provides the BaseAgent class that connects all the interfaces
* and implements the core agent workflow. It coordinates between IChat,
* IToolScheduler, and AgentEvent system to provide a complete agent experience.
*/
import { IAgent, IAgentConfig, IAgentStatus, IChat, IToolScheduler, ITokenUsage, AgentEvent, EventHandler, ChatMessage, ITool } from './interfaces.js';
import { ILogger } from './logger.js';
/**
* BaseAgent implementation that connects all core interfaces
*
* This class provides the main agent functionality by coordinating:
* - IChat: For conversation management and streaming responses
* - IToolScheduler: For tool execution and management
* - AgentEvent: For event emission and monitoring
*
* The agent follows this workflow:
* 1. Receive user input
* 2. Send to chat for LLM processing
* 3. Extract tool calls from response
* 4. Execute tools via scheduler
* 5. Integrate results back into conversation
* 6. Emit events throughout the process
*
* Key features:
* - Streaming-first approach for real-time responses
* - Comprehensive event emission for monitoring
* - Automatic tool call extraction and execution
* - Proper error handling and state management
* - Thread-safe operation with abort signal support
*
* @example
* ```typescript
* const agent = new BaseAgent(config, chat, toolScheduler);
* agent.onEvent('logger', (event) => console.log(event));
*
* const abortController = new AbortController();
* for await (const event of agent.process('Hello', 'session-1', abortController.signal)) {
* console.log(event);
* }
* ```
*/
export declare abstract class BaseAgent implements IAgent {
protected agentConfig: IAgentConfig;
protected chat: IChat;
protected toolScheduler: IToolScheduler;
/** Map of event handler IDs to their handler functions */
private eventHandlers;
/** Current conversation turn number, incremented for each user input */
private currentTurn;
/** Flag indicating if the agent is currently processing a request */
private isRunning;
/** Timestamp of the last status update */
private lastUpdateTime;
/** Logger instance for this agent */
protected logger: ILogger;
/**
* Constructor for BaseAgent
*
* @param config - Agent configuration including model, working directory, etc.
* @param chat - Chat instance for conversation management
* @param toolScheduler - Tool scheduler for executing tool calls
*/
constructor(agentConfig: IAgentConfig, chat: IChat, toolScheduler: IToolScheduler);
registerTool(tool: ITool): void;
removeTool(toolName: string): boolean;
getToolList(): ITool[];
getTool(toolName: string): ITool | undefined;
/**
* Set up internal event handlers
*/
private setupEventHandlers;
/**
* Main processing method - handles complete conversation flow
*
* This is the primary entry point for processing user input. It orchestrates
* the entire conversation flow including:
* - User input processing
* - LLM response generation (streaming)
* - Tool call extraction and execution
* - Result integration
* - Event emission
*
* The method is designed to be thread-safe and respects abort signals for
* graceful cancellation.
*
* @param userInput - The user's input text
* @param sessionId - Unique identifier for this conversation session
* @param abortSignal - Signal to abort the processing if needed
* @returns AsyncGenerator that yields AgentEvent objects
*
* @example
* ```typescript
* const abortController = new AbortController();
* for await (const event of agent.process('Hello', 'session-1', abortController.signal)) {
* if (event.type === AgentEventType.AssistantMessage) {
* console.log(event.data);
* }
* }
* ```
*/
process(userInput: string, sessionId: string, abortSignal: AbortSignal): AsyncGenerator<AgentEvent>;
/**
* Process one turn of conversation
*
* This method processes a single turn of conversation, handling:
* - LLM response generation (streaming)
* - Tool call extraction and execution
* - Event emission
*
* @param sessionId - Unique identifier for this conversation session
* @param chatMessage - The chat message to process
* @param abortSignal - Signal to abort the processing if needed
* @returns AsyncGenerator that yields AgentEvent objects
*/
processOneTurn(sessionId: string, chatMessage: ChatMessage, abortSignal: AbortSignal): AsyncGenerator<AgentEvent>;
/**
* Execute tools and handle results
*
* This method handles the complete tool execution lifecycle:
* 1. Emit tool call request events
* 2. Schedule tools for execution via the tool scheduler
* 3. Wait for tool completion
* 4. Process and emit tool results
* 5. Integrate results back into conversation
*
* The method properly handles errors, timeouts, and abort signals.
*
* @param toolCalls - Array of tool call requests to execute
* @param sessionId - Current session identifier
* @param abortSignal - Signal to abort tool execution
* @returns AsyncGenerator yielding tool-related events
*
* @private
*/
private executeTools;
/**
* Wait for completion of current turn's tool calls only
*
* This method waits for the specific tool calls from the current turn to complete,
* filtering out any previously completed tool calls from other turns.
*
* @param currentToolCalls - The tool calls from the current turn
* @param abortSignal - Signal to abort waiting
* @returns Promise resolving to array of completed tool calls from current turn only
*
* @private
*/
private waitForCurrentToolCompletion;
/**
* Create user content from input
*
* Converts user input string into a ConversationContent object
* with proper metadata and formatting.
*
* @param userInput - The user's input text
* @param sessionId - Current session identifier
* @returns ConversationContent object representing the user input
*
* @private
*/
private createUserContent;
/**
* Extract content from LLM response chunk
*
* Extracts text content from a streaming LLM response chunk.
* Filters for text-type content parts and concatenates their text.
*
* @param chunk - LLM response chunk
* @returns Extracted text content or null if no text found
*
* @private
*/
private extractContentFromChunk;
/**
* Extract tool calls from LLM response chunk
*
* Scans a streaming LLM response chunk for function call parts
* and converts them to tool call request format.
*
* @param chunk - LLM response chunk to scan
* @returns Array of tool call request info objects
*
* @private
*/
private extractToolCallsFromChunk;
/**
* Convert tool result to content parts
*/
private convertToolResultToContent;
/**
* Convert completed tool calls to conversation content
*/
private convertToolCallsToContent;
/**
* Convert tool call responses to a single chat message for the next turn.
* This method aggregates all tool call responses into a single message.
*
* @param toolCallResponses - Array of ToolCallResponse events from the current turn.
* @returns A single ChatMessage object containing all tool call results.
*/
private convertToolCallResponsesToChatMessage;
/**
* Register event handler
*/
onEvent(id: string, handler: EventHandler): void;
/**
* Remove event handler
*/
offEvent(id: string): void;
/**
* Create and emit event
*/
private createEvent;
/**
* Create error event
*/
private createErrorEvent;
/**
* Emit event to all handlers
*/
private emitEvent;
/**
* Get the underlying chat instance
*/
getChat(): IChat;
/**
* Get the tool scheduler instance
*/
getToolScheduler(): IToolScheduler;
/**
* Get current token usage
*/
getTokenUsage(): ITokenUsage;
/**
* Clear conversation history
*/
clearHistory(): void;
/**
* Set system prompt
*/
setSystemPrompt(systemPrompt: string): void;
/**
* Get current system prompt
*/
getSystemPrompt(): string | undefined;
/**
* Get current agent status
*/
getStatus(): IAgentStatus;
/**
* Generate unique prompt ID
*/
private generatePromptId;
/**
* Wait for specified time
*/
private wait;
}
//# sourceMappingURL=baseAgent.d.ts.map