@continue-reasoning/mini-agent
Version:
A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities
181 lines • 6.96 kB
JavaScript
/**
* @fileoverview Universal AI Agent Framework Interfaces
*
* This file defines platform-agnostic interfaces for AI agents that can work
* with multiple LLM providers (Gemini, OpenAI, etc.). All interfaces are
* designed to be implementation-independent and focus on core functionality.
*/
/**
* Tool confirmation outcome options
*/
export var ToolConfirmationOutcome;
(function (ToolConfirmationOutcome) {
ToolConfirmationOutcome["ProceedOnce"] = "proceed_once";
ToolConfirmationOutcome["ProceedAlways"] = "proceed_always";
ToolConfirmationOutcome["ProceedAlwaysServer"] = "proceed_always_server";
ToolConfirmationOutcome["ProceedAlwaysTool"] = "proceed_always_tool";
ToolConfirmationOutcome["ModifyWithEditor"] = "modify_with_editor";
ToolConfirmationOutcome["Cancel"] = "cancel";
})(ToolConfirmationOutcome || (ToolConfirmationOutcome = {}));
/**
* Agent event types - based on IChat LLMResponse events + tool execution events
*
* DESIGN PRINCIPLE: Maximize reuse of IChat's LLMResponse event stream.
* We only add agent-specific events for tool execution and user interactions.
*
* Base events from LLMResponse:
* - response.start, response.chunk.*, response.complete, response.failed, response.incomplete
*
* Agent-specific events:
* - user.message: User input events
* - tool.call.execution.*: Tool execution lifecycle
* - agent.*: Agent-level events (errors, cancellation)
*/
export var AgentEventType;
(function (AgentEventType) {
// User interaction events
AgentEventType["UserMessage"] = "user.message";
AgentEventType["UserCancelled"] = "user.cancelled";
// LLM Response events (directly from IChat)
// Note: These will be forwarded directly from LLMResponse
AgentEventType["ResponseStart"] = "response.start";
AgentEventType["ResponseChunkTextDelta"] = "response.chunk.text.delta";
AgentEventType["ResponseChunkTextDone"] = "response.chunk.text.done";
AgentEventType["ResponseChunkThinkingDelta"] = "response.chunk.thinking.delta";
AgentEventType["ResponseChunkThinkingDone"] = "response.chunk.thinking.done";
AgentEventType["ResponseChunkFunctionCallDelta"] = "response.chunk.function_call.delta";
AgentEventType["ResponseChunkFunctionCallDone"] = "response.chunk.function_call.done";
AgentEventType["ResponseComplete"] = "response.complete";
AgentEventType["ResponseIncomplete"] = "response.incomplete";
AgentEventType["ResponseFailed"] = "response.failed";
// Tool execution events (Agent-specific)
AgentEventType["ToolExecutionStart"] = "tool.call.execution.start";
AgentEventType["ToolExecutionDone"] = "tool.call.execution.done";
AgentEventType["ToolConfirmation"] = "tool.confirmation";
// Agent-level events
AgentEventType["TurnComplete"] = "turn.complete";
AgentEventType["Error"] = "agent.error";
AgentEventType["ModelFallback"] = "agent.model_fallback";
})(AgentEventType || (AgentEventType = {}));
/**
* Utility function to create AgentEvent from LLMResponse
*
* This is the core function that maps LLMResponse events to AgentEvents,
* maintaining the event stream consistency between IChat and IAgent layers.
*/
export function createAgentEventFromLLMResponse(llmResponse, sessionId, turn) {
// Map LLMResponse type to AgentEventType
let agentEventType;
switch (llmResponse.type) {
case 'response.start':
agentEventType = AgentEventType.ResponseStart;
break;
case 'response.chunk.text.delta':
agentEventType = AgentEventType.ResponseChunkTextDelta;
break;
case 'response.chunk.text.done':
agentEventType = AgentEventType.ResponseChunkTextDone;
break;
case 'response.chunk.thinking.delta':
agentEventType = AgentEventType.ResponseChunkThinkingDelta;
break;
case 'response.chunk.thinking.done':
agentEventType = AgentEventType.ResponseChunkThinkingDone;
break;
case 'response.chunk.function_call.delta':
agentEventType = AgentEventType.ResponseChunkFunctionCallDelta;
break;
case 'response.chunk.function_call.done':
agentEventType = AgentEventType.ResponseChunkFunctionCallDone;
break;
case 'response.complete':
agentEventType = AgentEventType.ResponseComplete;
break;
case 'response.incomplete':
agentEventType = AgentEventType.ResponseIncomplete;
break;
case 'response.failed':
agentEventType = AgentEventType.ResponseFailed;
break;
default:
// For any new LLM event types, we use a generic mapping
agentEventType = AgentEventType.ResponseComplete;
}
return {
type: agentEventType,
data: llmResponse,
timestamp: Date.now(),
sessionId,
turn,
metadata: {
source: 'llm_response',
originalType: llmResponse.type,
},
};
}
/**
* Tool call execution states
*/
export var ToolCallStatus;
(function (ToolCallStatus) {
ToolCallStatus["Validating"] = "validating";
ToolCallStatus["Scheduled"] = "scheduled";
ToolCallStatus["Executing"] = "executing";
ToolCallStatus["Success"] = "success";
ToolCallStatus["Error"] = "error";
ToolCallStatus["Cancelled"] = "cancelled";
ToolCallStatus["AwaitingApproval"] = "awaiting_approval";
})(ToolCallStatus || (ToolCallStatus = {}));
/**
* Type guard for IAgent
*/
export function isAgent(obj) {
return (typeof obj === 'object' &&
obj !== null &&
'process' in obj &&
'getChat' in obj &&
'getToolScheduler' in obj &&
'getTokenUsage' in obj &&
'clearHistory' in obj &&
'setSystemPrompt' in obj &&
'getSystemPrompt' in obj &&
'getStatus' in obj &&
'onEvent' in obj &&
'offEvent' in obj);
}
/**
* Type guard for IChat
*/
export function isChat(obj) {
return (typeof obj === 'object' &&
obj !== null &&
'sendMessageStream' in obj &&
'getHistory' in obj &&
'clearHistory' in obj &&
'addHistory' in obj &&
'setHistory' in obj &&
'setSystemPrompt' in obj &&
'getSystemPrompt' in obj &&
'getTokenUsage' in obj &&
'getTokenTracker' in obj &&
'isProcessing' in obj &&
'getModelInfo' in obj &&
'handleModelFallback' in obj);
}
/**
* Type guard for ITool
*/
export function isTool(obj) {
return (typeof obj === 'object' &&
obj !== null &&
'name' in obj &&
'description' in obj &&
'schema' in obj &&
'isOutputMarkdown' in obj &&
'canUpdateOutput' in obj &&
'validateToolParams' in obj &&
'getDescription' in obj &&
'shouldConfirmExecute' in obj &&
'execute' in obj);
}
//# sourceMappingURL=interfaces.js.map