erosolar-cli
Version:
Unified AI agent framework for the command line - Multi-provider support with schema-driven tools, code intelligence, and transparent reasoning
364 lines • 12.4 kB
JavaScript
import OpenAI from 'openai';
// ============================================================================
// Error Recovery Constants
// ============================================================================
const RECOVERABLE_ERROR_PATTERNS = [
'premature close',
'premature end',
'unexpected end',
'aborted',
'fetcherror',
'invalid response body',
'gunzip',
'decompress',
'econnreset',
'econnrefused',
'epipe',
'socket hang up',
'network',
'timeout',
'rate limit',
'429',
'500',
'502',
'503',
'504',
];
function isRecoverableError(error) {
if (!(error instanceof Error))
return false;
const message = error.message.toLowerCase();
const errorName = error.name?.toLowerCase() ?? '';
const errorCode = error.code?.toLowerCase() ?? '';
const allText = `${message} ${errorName} ${errorCode}`;
return RECOVERABLE_ERROR_PATTERNS.some(pattern => allText.includes(pattern));
}
/**
* Check if a model supports reasoning parameters.
* Only GPT-5.x models support the reasoning effort parameter.
*/
function supportsReasoningParam(model) {
const lowerModel = model.toLowerCase();
// Only GPT-5 and GPT-5.1 models support reasoning parameters
return lowerModel.includes('gpt-5');
}
export class OpenAIResponsesProvider {
id;
model;
client;
reasoningEffort;
textVerbosity;
maxRetries;
constructor(options) {
const clientConfig = {
apiKey: options.apiKey,
timeout: options.timeout ?? 120000,
maxRetries: 0, // We handle retries ourselves
};
if (options.baseURL) {
clientConfig.baseURL = options.baseURL;
}
this.client = new OpenAI(clientConfig);
this.id = options.providerId ?? 'openai';
this.model = options.model;
// Only set reasoningEffort for models that support it
this.reasoningEffort = supportsReasoningParam(options.model) ? options.reasoningEffort : undefined;
this.textVerbosity = options.textVerbosity;
this.maxRetries = options.maxRetries ?? 3;
}
/**
* Sleep for a given number of milliseconds
*/
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
/**
* Calculate exponential backoff delay with jitter
*/
getBackoffDelay(attempt, baseDelay = 1000, maxDelay = 30000) {
const delay = Math.min(baseDelay * Math.pow(2, attempt), maxDelay);
return delay + Math.random() * delay * 0.1;
}
/**
* Execute request with retry logic for transient errors
*/
async executeWithRetry(operation, operationName) {
let lastError;
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try {
return await operation();
}
catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
if (isRecoverableError(error) && attempt < this.maxRetries) {
const delay = this.getBackoffDelay(attempt);
console.warn(`[${this.id}] ${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}): ` +
`${lastError.message}. Retrying in ${Math.round(delay)}ms...`);
await this.sleep(delay);
continue;
}
throw error;
}
}
throw lastError;
}
async generate(messages, tools) {
return this.executeWithRetry(async () => {
const request = {
model: this.model,
input: mapMessages(messages),
tools: tools.length ? tools.map(mapTool) : undefined,
// Enable parallel tool calls for better efficiency
parallel_tool_calls: tools.length ? true : undefined,
stream: false,
};
if (this.reasoningEffort) {
request.reasoning = { effort: this.reasoningEffort };
}
if (this.textVerbosity) {
request.text = { verbosity: this.textVerbosity };
}
const response = await this.client.responses.create(request);
assertHasOutput(response);
// Safely extract tool calls with error recovery
let toolCalls = [];
try {
toolCalls = response.output.filter(isFunctionCall).map(mapToolCall);
}
catch (parseError) {
console.warn(`[${this.id}] Failed to parse tool calls, recovering: ` +
`${parseError instanceof Error ? parseError.message : String(parseError)}`);
}
const usage = mapUsage(response.usage);
if (toolCalls.length > 0) {
return {
type: 'tool_calls',
toolCalls,
content: extractTextContent(response),
usage,
};
}
return {
type: 'message',
content: extractTextContent(response),
usage,
};
}, 'generate');
}
async *generateStream(messages, tools) {
const request = {
model: this.model,
input: mapMessages(messages),
tools: tools.length ? tools.map(mapTool) : undefined,
// Enable parallel tool calls for better efficiency
parallel_tool_calls: tools.length ? true : undefined,
stream: true,
};
if (this.reasoningEffort) {
request.reasoning = { effort: this.reasoningEffort };
}
if (this.textVerbosity) {
request.text = { verbosity: this.textVerbosity };
}
const stream = await this.client.responses.create(request);
// Track function calls by item_id -> { call_id, name, arguments }
const functionCallsById = new Map();
for await (const event of stream) {
switch (event.type) {
case 'response.output_text.delta':
if (event.delta) {
yield { type: 'content', content: event.delta };
}
break;
// Capture function call metadata when it's added
case 'response.output_item.added': {
const item = event.item;
if (item?.type === 'function_call' && item.id) {
functionCallsById.set(item.id, {
callId: item.call_id ?? item.id,
name: item.name ?? '',
arguments: '',
});
}
break;
}
case 'response.function_call_arguments.delta': {
const itemId = event.item_id;
if (itemId) {
const existing = functionCallsById.get(itemId);
if (existing) {
existing.arguments += event.delta ?? '';
}
}
break;
}
case 'response.function_call_arguments.done': {
const itemId = event.item_id;
if (itemId) {
const pending = functionCallsById.get(itemId);
if (pending) {
let parsed = {};
try {
parsed = JSON.parse(pending.arguments || '{}');
}
catch {
parsed = {};
}
yield {
type: 'tool_call',
toolCall: {
id: pending.callId,
name: pending.name,
arguments: parsed,
},
};
functionCallsById.delete(itemId);
}
}
break;
}
case 'response.done':
case 'response.completed':
if (event.response?.usage) {
const usage = mapUsage(event.response.usage);
if (usage) {
yield { type: 'usage', usage };
}
}
yield { type: 'done' };
break;
}
}
}
}
function mapMessages(messages) {
const input = [];
for (const message of messages) {
switch (message.role) {
case 'system':
case 'user':
case 'assistant': {
input.push({
role: message.role,
content: message.content,
type: 'message',
});
if (message.role === 'assistant') {
for (const call of message.toolCalls ?? []) {
input.push({
type: 'function_call',
call_id: call.id,
name: call.name,
arguments: JSON.stringify(call.arguments ?? {}),
});
}
}
break;
}
case 'tool': {
input.push({
type: 'function_call_output',
call_id: message.toolCallId,
output: message.content,
});
break;
}
default:
break;
}
}
return input;
}
function mapTool(definition) {
return {
type: 'function',
name: definition.name,
description: definition.description,
parameters: (definition.parameters ?? {
type: 'object',
properties: {},
}),
strict: false,
};
}
function extractTextContent(response) {
const primary = collectOutputText(response.output);
if (primary) {
return primary;
}
const aggregated = typeof response.output_text === 'string' ? response.output_text.trim() : '';
if (aggregated) {
return aggregated;
}
const refusal = collectRefusalText(response.output);
if (refusal) {
return refusal;
}
return '';
}
function collectOutputText(output) {
const chunks = [];
for (const item of output) {
if (!isOutputMessage(item)) {
continue;
}
for (const block of item.content) {
if (block.type === 'output_text') {
chunks.push(block.text);
}
}
}
return chunks.join('\n').trim();
}
function collectRefusalText(output) {
for (const item of output) {
if (!isOutputMessage(item)) {
continue;
}
for (const block of item.content) {
if (isRefusal(block) && block.refusal?.trim()) {
return block.refusal.trim();
}
}
}
return '';
}
function mapToolCall(call) {
let parsed = {};
try {
parsed = JSON.parse(call.arguments ?? '{}');
}
catch {
parsed = {};
}
return {
id: call.call_id ?? call.id ?? '',
name: call.name,
arguments: parsed,
};
}
function mapUsage(usage) {
if (!usage) {
return undefined;
}
return {
inputTokens: usage.input_tokens,
outputTokens: usage.output_tokens,
totalTokens: usage.total_tokens,
};
}
function isFunctionCall(item) {
return item.type === 'function_call';
}
function isOutputMessage(item) {
return item.type === 'message';
}
function isRefusal(block) {
return block.type === 'refusal';
}
function assertHasOutput(response) {
if (!('output' in response)) {
throw new Error('Streaming responses are not supported in this runtime.');
}
}
//# sourceMappingURL=openaiResponsesProvider.js.map