UNPKG

@unified-llm/core

Version:

Unified LLM interface (in-memory).

468 lines 21 kB
import OpenAI from 'openai'; import { Agent, run, user, assistant, tool as agentsTool, MCPServerStdio, MCPServerSSE, MCPServerStreamableHttp, OpenAIResponsesModel, OpenAIChatCompletionsModel, } from '@openai/agents'; import { validateChatRequest } from '../../utils/validation.js'; import { validateOpenAILogLevel } from '../../validators/index.js'; import BaseProvider from '../base-provider.js'; import { normalizeToolParametersForAgents } from '../../utils/tool-schema.js'; export class OpenAIAgentProvider extends BaseProvider { constructor({ apiKey, client, model, tools = [], mcpServers, openaiApi = 'responses', logLevel = 'warn', }) { super({ model, tools }); // Validate log level const validatedLogLevel = validateOpenAILogLevel(logLevel); // The agents SDK picks up OPENAI_LOG from env as well if (validatedLogLevel) { process.env.OPENAI_LOG = validatedLogLevel; } // Do not touch global client/key; inject per-agent model instead this.openaiClient = client !== null && client !== void 0 ? client : (apiKey ? new OpenAI({ apiKey }) : undefined); this.mcpServerConfigs = mcpServers; this.providerTools = tools !== null && tools !== void 0 ? tools : []; this.openaiApi = openaiApi; } // --- Public API ----------------------------------------------------------- async chat(request) { validateChatRequest(request); const { agent, cleanup, effectiveModel } = await this.buildEphemeralAgent(request); const inputs = this.toAgentInputs(request.messages); const ac = new AbortController(); try { const result = await run(agent, inputs, { signal: ac.signal }); const unified = this.convertAgentResultToUnified(result); if (!unified.model) { unified.model = effectiveModel; } return unified; } catch (error) { throw this.handleError(error); } finally { try { ac.abort(); } catch (e) { void e; } await cleanup(); } } /** * Streaming: emits chunk responses as they arrive, then a final, fully-unified message. */ async *stream(request) { var _a, _b; validateChatRequest(request); const { agent, cleanup, effectiveModel } = await this.buildEphemeralAgent(request); const inputs = this.toAgentInputs(request.messages); const ac = new AbortController(); try { const streamed = await run(agent, inputs, { stream: true, signal: ac.signal }); const textStream = streamed.toTextStream({ compatibleWithNodeStreams: true }); let fullText = ''; // Start event yield { id: this.generateMessageId(), model: effectiveModel, provider: 'openai', message: { id: this.generateMessageId(), role: 'assistant', content: [], createdAt: new Date() }, text: '', createdAt: new Date(), rawResponse: streamed, eventType: 'start', outputIndex: 0, }; try { for await (const chunk of textStream) { const textChunk = typeof chunk === 'string' ? chunk : (_b = (_a = chunk === null || chunk === void 0 ? void 0 : chunk.toString) === null || _a === void 0 ? void 0 : _a.call(chunk, 'utf8')) !== null && _b !== void 0 ? _b : ''; if (!textChunk) continue; fullText += textChunk; const ev = { id: this.generateMessageId(), model: effectiveModel, provider: 'openai', message: { id: this.generateMessageId(), role: 'assistant', content: [{ type: 'text', text: textChunk }], createdAt: new Date() }, text: fullText, createdAt: new Date(), rawResponse: streamed, eventType: 'text_delta', outputIndex: 0, delta: { type: 'text', text: textChunk }, }; yield ev; } } finally { try { if (typeof (textStream === null || textStream === void 0 ? void 0 : textStream.destroy) === 'function') { textStream.destroy(); } } catch (e) { void e; } } const completed = await streamed.completed; // Stop event yield { id: this.generateMessageId(), model: effectiveModel, provider: 'openai', message: { id: this.generateMessageId(), role: 'assistant', content: fullText ? [{ type: 'text', text: fullText }] : [], createdAt: new Date() }, text: fullText, createdAt: new Date(), rawResponse: completed, eventType: 'stop', outputIndex: 0, }; } finally { try { ac.abort(); } catch (e) { void e; } await cleanup(); } } // --- Agent setup / lifecycle -------------------------------------------- async buildEphemeralAgent(request) { var _a, _b; if (!request.model && !this.model) { throw new Error('Model must be specified either in provider or request'); } const servers = await this.initMCPServers(); // Construct system messages as instructions const systemText = this.collectSystemText(request.messages) || 'You are a helpful assistant.'; const effectiveModel = (_b = (_a = request.model) !== null && _a !== void 0 ? _a : this.model) !== null && _b !== void 0 ? _b : 'gpt-4o-mini'; // Build model instance with injected client when available (no global state) let modelInstance; if (this.openaiClient) { const client = this.openaiClient; modelInstance = this.openaiApi === 'responses' ? new OpenAIResponsesModel(client, effectiveModel) : new OpenAIChatCompletionsModel(client, effectiveModel); } else { // Explicitly use the default client resolution path when no client provided modelInstance = effectiveModel; } // Safety check: if a client was provided but we still ended up with a string model if (this.openaiClient && typeof modelInstance === 'string') { throw new Error('Failed to construct OpenAI model with injected client. Ensure @openai/agents exports the model classes.'); } const agent = new Agent({ name: 'Assistant', instructions: systemText, mcpServers: servers.length ? servers : undefined, model: modelInstance, tools: this.adaptFunctionTools(this.providerTools), }); const cleanup = async () => { await this.closeMCPServers(servers); }; return { agent, servers, cleanup, effectiveModel }; } async initMCPServers() { var _a; const servers = []; if (!((_a = this.mcpServerConfigs) === null || _a === void 0 ? void 0 : _a.length)) return servers; for (const config of this.mcpServerConfigs) { let server; try { switch (config.type) { case 'stdio': { if (!('command' in config) || !config.command) throw new Error('Command is required for stdio MCP server'); server = new MCPServerStdio({ name: config.name, command: config.command, args: config.args || [], env: config.env, }); await server.connect(); break; } case 'sse': { if (!('url' in config) || !config.url) { throw new Error('URL is required for SSE MCP server'); } server = new MCPServerSSE({ name: config.name, url: config.url, requestInit: config.headers ? { headers: config.headers } : undefined, }); await server.connect(); break; } case 'streamable_http': { if (!('url' in config) || !config.url) { throw new Error('URL is required for Streamable HTTP MCP server'); } server = new MCPServerStreamableHttp({ name: config.name, url: config.url, requestInit: config.headers ? { headers: config.headers } : undefined, }); await server.connect(); break; } default: // eslint-disable-next-line no-console console.warn(`Unknown MCP server type: ${config.type}`); } } catch (err) { // eslint-disable-next-line no-console console.error(`Failed to initialize MCP server ${config.name}:`, err); server = undefined; } if (server) servers.push(server); } return servers; } async closeMCPServers(servers) { await Promise.allSettled(servers.map(async (srv) => { try { if (typeof srv.close === 'function') { await srv.close(); } else if (typeof srv.cleanup === 'function') { await srv.cleanup(); } else if (typeof srv.disconnect === 'function') { await srv.disconnect(); } } catch (e) { void e; } })); } // --- I/O adapters --------------------------------------------------------- toAgentInputs(messages) { const items = []; for (const m of messages) { const text = this.extractTextFromContent(m.content); if (!text) continue; if (m.role === 'system') { // system は instructions に既に反映しているので通常は入れない continue; } else if (m.role === 'user') { items.push(user(text)); } else if (m.role === 'assistant') { items.push(assistant(text)); } else { // tool/function/developer はフォールバックで user として扱う items.push(user(text)); } } return items; } collectSystemText(messages) { return messages .filter((m) => m.role === 'system') .map((m) => this.extractTextFromContent(m.content)) .filter(Boolean) .join('\n\n'); } adaptFunctionTools(tools) { if (!(tools === null || tools === void 0 ? void 0 : tools.length)) return undefined; return tools.map((t, idx) => { var _a, _b, _c, _d; const name = (((_a = t.function) === null || _a === void 0 ? void 0 : _a.name) && String(t.function.name)) || (t.handler && t.handler.name) || `tool_${idx + 1}`; // Normalize tool parameters per Agents API schema requirements const parameters = normalizeToolParametersForAgents((_b = t.function) === null || _b === void 0 ? void 0 : _b.parameters); return agentsTool({ name, description: (_d = (_c = t.function) === null || _c === void 0 ? void 0 : _c.description) !== null && _d !== void 0 ? _d : '', parameters, async execute(args) { const res = await t.handler(args); if (res == null) return ''; return typeof res === 'string' ? res : JSON.stringify(res); }, }); }); } extractTextFromContent(content) { if (!content) return ''; if (typeof content === 'string') return content; const chunks = []; for (const c of content) { if (c.type === 'text' && typeof c.text === 'string') { chunks.push(c.text); } } return chunks.join('\n'); } // --- Unified response conversion ----------------------------------------- convertAgentResultToUnified(result) { const { text, messageContent, model, usage } = this.extractUnifiedPayload(result); const unifiedMessage = { id: this.generateMessageId(), role: 'assistant', content: messageContent, createdAt: new Date(), }; return { id: this.generateMessageId(), model, provider: 'openai', message: unifiedMessage, text, usage, finish_reason: 'stop', createdAt: new Date(), rawResponse: result, }; } /** * Extracts top-level `text`, `message.content` (formatted as BaseContent), * `model`, and `usage` from either RunResult or StreamedRunResult. * - Uses `state.generatedItems` for content extraction * - Uses `state.context.usage` for usage stats */ extractUnifiedPayload(result) { var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k; let usage; let messageContent = []; let text = ''; const state = ((_a = result === null || result === void 0 ? void 0 : result.state) === null || _a === void 0 ? void 0 : _a.toJSON) ? result.state.toJSON() : JSON.parse(JSON.stringify((_b = result === null || result === void 0 ? void 0 : result.state) !== null && _b !== void 0 ? _b : {})); const generatedItems = state === null || state === void 0 ? void 0 : state.generatedItems; const model = ((_d = (_c = state === null || state === void 0 ? void 0 : state.lastModelResponse) === null || _c === void 0 ? void 0 : _c.providerData) === null || _d === void 0 ? void 0 : _d.model) || ''; // Extract content from generatedItems if (Array.isArray(generatedItems)) { for (const item of generatedItems) { if (item.rawItem) { const rawItem = item.rawItem; // Process content array if available (for message_output_item) if (Array.isArray(rawItem.content)) { for (const contentItem of rawItem.content) { // Handle text content types (output_text, text, etc.) if (contentItem.type && typeof contentItem.text === 'string') { const textContent = { type: 'text', text: contentItem.text, role: 'assistant' }; messageContent.push(textContent); // Only set text if this is output_text (final message) if (contentItem.type === 'output_text') { text = contentItem.text; } } } } // Process output if available (for tool calls, function results, etc.) if (rawItem.output && rawItem.output.type === 'text' && typeof rawItem.output.text === 'string') { const textContent = { type: 'text', text: rawItem.output.text, role: 'tool' }; messageContent.push(textContent); } } } } // Get usage from state.context.usage if ((_e = state === null || state === void 0 ? void 0 : state.context) === null || _e === void 0 ? void 0 : _e.usage) { const contextUsage = state.context.usage; usage = { inputTokens: (_f = contextUsage.inputTokens) !== null && _f !== void 0 ? _f : 0, outputTokens: (_g = contextUsage.outputTokens) !== null && _g !== void 0 ? _g : 0, totalTokens: (_h = contextUsage.totalTokens) !== null && _h !== void 0 ? _h : 0, }; } // Secondary path: result.output (fallback for compatibility) if (messageContent.length === 0 && Array.isArray(result === null || result === void 0 ? void 0 : result.output)) { for (const outputItem of result.output) { if (Array.isArray(outputItem === null || outputItem === void 0 ? void 0 : outputItem.content)) { for (const contentItem of outputItem.content) { if (typeof (contentItem === null || contentItem === void 0 ? void 0 : contentItem.text) === 'string') { const textContent = { type: 'text', text: contentItem.text, role: 'assistant' }; messageContent.push(textContent); text += contentItem.text; } } } } } // Fallback: parse from lastProcessedResponse.newItems if still no content if (messageContent.length === 0) { const items = (_j = state === null || state === void 0 ? void 0 : state.lastProcessedResponse) === null || _j === void 0 ? void 0 : _j.newItems; if (Array.isArray(items)) { for (const item of items) { if (item.type === 'message_output_item' && Array.isArray((_k = item.rawItem) === null || _k === void 0 ? void 0 : _k.content)) { for (const contentItem of item.rawItem.content) { if (typeof (contentItem === null || contentItem === void 0 ? void 0 : contentItem.text) === 'string') { const textContent = { type: 'text', text: contentItem.text, role: 'assistant' }; messageContent.push(textContent); text += contentItem.text; } } } } } } // Ensure message.content is not empty when we have text if (messageContent.length === 0 && text) { const fallbackContent = { type: 'text', text, role: 'assistant' }; messageContent = [fallbackContent]; } return { text, messageContent, model, usage }; } // --- Error handling ------------------------------------------------------- handleError(error) { if (error instanceof OpenAI.APIError) { const e = new Error(error.message); e.statusCode = error.status; e.code = error.code || 'openai_error'; e.type = this.mapErrorType(error.status); e.provider = 'openai'; e.details = error; return e; } if (error instanceof Error) return error; return new Error('Unknown error occurred'); } mapErrorType(status) { if (!status) return 'api_error'; if (status === 429) return 'rate_limit'; if (status === 401) return 'authentication'; if (status >= 400 && status < 500) return 'invalid_request'; if (status >= 500) return 'server_error'; return 'api_error'; } } //# sourceMappingURL=agent-provider.js.map