@chainlink/mcp-server
Version:
Prototype MCP Server for CLL
90 lines • 3.12 kB
JavaScript
;
/**
* @fileoverview OpenAI service implementation for AI completions
*
* Provides integration with OpenAI's GPT models through their official API.
* Handles authentication, request formatting, and response parsing for
* chat completions using the OpenAI SDK.
*/
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIService = void 0;
const openai_1 = __importDefault(require("openai"));
const ai_service_1 = require("./ai-service");
/**
* OpenAI service implementation for chat completions
*
* Supports all OpenAI GPT models including GPT-3.5-turbo, GPT-4, and newer variants.
* Automatically handles rate limiting, retries, and error responses from the OpenAI API.
*
* @class OpenAIService
* @extends AIService
*/
class OpenAIService extends ai_service_1.AIService {
/** OpenAI client instance for API communication */
client;
/**
* Initialize the OpenAI service with API configuration
*
* @param config - Service configuration including API key and model selection
*/
constructor(config) {
super(config);
this.client = new openai_1.default({
apiKey: config.apiKey,
});
}
/**
* Generate a chat completion response using OpenAI's API
*
* Converts internal message format to OpenAI's expected format,
* sends the request, and transforms the response back to our standard format.
*
* @param messages - Conversation history to send to OpenAI
* @returns Promise resolving to formatted AI response with usage statistics
* @throws {Error} When API key is invalid, quota exceeded, or other API errors
*/
async generateResponse(messages) {
const openaiMessages = messages.map((msg) => ({
role: msg.role,
content: msg.content,
}));
const response = await this.client.chat.completions.create({
model: this.config.model,
max_tokens: this.config.maxTokens || 2000,
messages: openaiMessages,
});
return {
content: response.choices[0]?.message?.content || "",
usage: response.usage
? {
input_tokens: response.usage.prompt_tokens,
output_tokens: response.usage.completion_tokens,
total_tokens: response.usage.total_tokens,
}
: undefined,
id: response.id,
requestId: undefined,
};
}
/**
* Check if the service has a valid API key configuration
*
* @returns True if API key is provided, false otherwise
*/
isConfigured() {
return !!this.config.apiKey;
}
/**
* Get the service name for identification
*
* @returns "OpenAI" as the service identifier
*/
getServiceName() {
return "OpenAI";
}
}
exports.OpenAIService = OpenAIService;
//# sourceMappingURL=openai-service.js.map