UNPKG

@juspay/neurolink

Version:

Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and

133 lines (132 loc) 5.79 kB
import { createOpenAI } from "@ai-sdk/openai"; import { streamText } from "ai"; import { AIProviderName } from "../core/types.js"; import { BaseProvider } from "../core/baseProvider.js"; import { logger } from "../utils/logger.js"; import { createTimeoutController, TimeoutError, } from "../utils/timeout.js"; import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js"; import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js"; import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js"; import { streamAnalyticsCollector } from "../core/streamAnalytics.js"; import { buildMessagesArray } from "../utils/messageBuilder.js"; import { createProxyFetch } from "../proxy/proxyFetch.js"; // Configuration helpers - now using consolidated utility const getOpenAIApiKey = () => { return validateApiKey(createOpenAIConfig()); }; const getOpenAIModel = () => { return getProviderModel("OPENAI_MODEL", "gpt-4o"); }; /** * OpenAI Provider v2 - BaseProvider Implementation * Migrated to use factory pattern with exact Google AI provider pattern */ export class OpenAIProvider extends BaseProvider { model; constructor(modelName, neurolink) { super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink); // Initialize OpenAI provider with proxy support const openai = createOpenAI({ apiKey: getOpenAIApiKey(), fetch: createProxyFetch(), }); // Initialize model this.model = openai(this.modelName); logger.debug("OpenAIProviderV2 initialized", { model: this.modelName, provider: this.providerName, }); } // =================== // ABSTRACT METHOD IMPLEMENTATIONS // =================== getProviderName() { return AIProviderName.OPENAI; } getDefaultModel() { return getOpenAIModel(); } /** * Returns the Vercel AI SDK model instance for OpenAI */ getAISDKModel() { return this.model; } handleProviderError(error) { if (error instanceof TimeoutError) { throw new NetworkError(error.message, this.providerName); } const errorObj = error; const message = errorObj?.message && typeof errorObj.message === "string" ? errorObj.message : "Unknown error"; const errorType = errorObj?.type && typeof errorObj.type === "string" ? errorObj.type : undefined; if (message.includes("API_KEY_INVALID") || message.includes("Invalid API key") || errorType === "invalid_api_key") { throw new AuthenticationError("Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.", this.providerName); } if (message.includes("rate limit") || errorType === "rate_limit_error") { throw new RateLimitError("OpenAI rate limit exceeded. Please try again later.", this.providerName); } if (message.includes("model_not_found")) { throw new InvalidModelError(`Model not found: ${this.modelName}`, this.providerName); } // Generic provider error throw new ProviderError(`OpenAI error: ${message}`, this.providerName); } /** * executeGenerate method removed - generation is now handled by BaseProvider. * For details on the changes and migration steps, refer to the BaseProvider documentation * and the migration guide in the project repository. */ async executeStream(options, analysisSchema) { this.validateStreamOptions(options); const startTime = Date.now(); const timeout = this.getTimeout(options); const timeoutController = createTimeoutController(timeout, this.providerName, "stream"); try { // Get tools consistently with generate method const shouldUseTools = !options.disableTools && this.supportsTools(); const tools = shouldUseTools ? await this.getAllTools() : {}; // Build message array from options const messages = buildMessagesArray(options); const result = await streamText({ model: this.model, messages: messages, temperature: options.temperature, maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS, tools, maxSteps: options.maxSteps || DEFAULT_MAX_STEPS, toolChoice: shouldUseTools ? "auto" : "none", abortSignal: timeoutController?.controller.signal, }); timeoutController?.cleanup(); // Transform stream to match StreamResult interface using BaseProvider method const transformedStream = this.createTextStream(result); // Create analytics promise that resolves after stream completion const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, { requestId: `openai-stream-${Date.now()}`, streamingMode: true, }); return { stream: transformedStream, provider: this.providerName, model: this.modelName, analytics: analyticsPromise, metadata: { startTime, streamId: `openai-${Date.now()}`, }, }; } catch (error) { timeoutController?.cleanup(); throw this.handleProviderError(error); } } } // Export for factory registration export default OpenAIProvider;