UNPKG

mycoder-agent

Version:

Agent module for mycoder - an AI-powered software development assistant

168 lines 5.96 kB
/** * OpenAI provider implementation */ import OpenAI from 'openai'; import { TokenUsage } from '../../tokens.js'; // Define model context window sizes for OpenAI models const OPENAI_MODEL_LIMITS = { 'gpt-4o': 128000, 'gpt-4-turbo': 128000, 'gpt-4-0125-preview': 128000, 'gpt-4-1106-preview': 128000, 'gpt-4': 8192, 'gpt-4-32k': 32768, 'gpt-3.5-turbo': 16385, 'gpt-3.5-turbo-16k': 16385, // Add other models as needed }; /** * OpenAI provider implementation */ export class OpenAIProvider { name = 'openai'; provider = 'openai.chat'; model; client; apiKey; baseUrl; organization; constructor(model, options = {}) { this.model = model; this.apiKey = options.apiKey ?? ''; this.baseUrl = options.baseUrl; // Initialize OpenAI client this.client = new OpenAI({ apiKey: this.apiKey, ...(this.baseUrl && { baseURL: this.baseUrl }), }); } /** * Generate text using OpenAI API */ async generateText(options) { const { messages, functions, temperature = 0.7, maxTokens: requestMaxTokens, stopSequences, topP, presencePenalty, frequencyPenalty, responseFormat, } = options; // Format messages for OpenAI const formattedMessages = this.formatMessages(messages); // Format functions for OpenAI const tools = functions ? this.formatFunctions(functions) : undefined; try { const requestOptions = { model: this.model, messages: formattedMessages, temperature, max_tokens: requestMaxTokens, stop: stopSequences, top_p: topP, presence_penalty: presencePenalty, frequency_penalty: frequencyPenalty, tools: tools, response_format: responseFormat === 'json_object' ? { type: 'json_object' } : undefined, }; const response = await this.client.chat.completions.create(requestOptions); // Extract content and tool calls const message = response.choices[0]?.message; const content = message?.content || ''; // Handle tool calls if present const toolCalls = []; if (message?.tool_calls) { for (const tool of message.tool_calls) { if (tool.type === 'function') { toolCalls.push({ id: tool.id, name: tool.function.name, content: tool.function.arguments, }); } } } // Create token usage const tokenUsage = new TokenUsage(); tokenUsage.input = response.usage?.prompt_tokens || 0; tokenUsage.output = response.usage?.completion_tokens || 0; // Calculate total tokens and get max tokens for the model const totalTokens = tokenUsage.input + tokenUsage.output; const modelMaxTokens = OPENAI_MODEL_LIMITS[this.model] || 8192; // Default fallback return { text: content, toolCalls, tokenUsage, totalTokens, maxTokens: modelMaxTokens, }; } catch (error) { throw new Error(`Error calling OpenAI API: ${error.message}`); } } /** * Format messages for OpenAI API */ formatMessages(messages) { return messages.map((msg) => { // Use switch for better type narrowing switch (msg.role) { case 'user': return { role: 'user', content: msg.content, }; case 'system': return { role: 'system', content: msg.content, }; case 'assistant': return { role: 'assistant', content: msg.content, }; case 'tool_use': // OpenAI doesn't have a direct equivalent to tool_use, // so we'll include it as a function call in an assistant message return { role: 'assistant', content: '', tool_calls: [ { id: msg.id, type: 'function', function: { name: msg.name, arguments: msg.content, }, }, ], }; case 'tool_result': // Tool results in OpenAI are represented as tool messages return { role: 'tool', content: msg.content, tool_call_id: msg.tool_use_id, }; default: // For any other role, default to user message return { role: 'user', content: 'Unknown message type', }; } }); } /** * Format functions for OpenAI API */ formatFunctions(functions) { return functions.map((fn) => ({ type: 'function', function: { name: fn.name, description: fn.description, parameters: fn.parameters, }, })); } } //# sourceMappingURL=openai.js.map