@chainlink/mcp-server
Version:
Prototype MCP Server for CLL
207 lines • 9.5 kB
JavaScript
;
/**
* @fileoverview LLM Service Factory for dynamic service creation
*
* Factory class that creates LLM service instances based on configuration
* and environment variables. Supports multiple providers with automatic
* API key detection, model configuration, and service availability checking.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.AIServiceFactory = void 0;
const anthropic_service_1 = require("./anthropic-service");
const openai_service_1 = require("./openai-service");
const gemini_service_1 = require("./gemini-service");
const ollama_service_1 = require("./ollama-service");
const logger_1 = require("../utils/logger");
const config_1 = require("../config");
/**
* Factory class for creating LLM service instances
*
* Provides centralized service creation with automatic configuration detection
* from environment variables, API key validation, and support for multiple
* providers including cloud-based and local services.
*
* Supported services:
* - Anthropic Claude (cloud)
* - OpenAI GPT (cloud)
* - Google Gemini (cloud)
* - Ollama (local)
*
* @class AIServiceFactory
*/
class AIServiceFactory {
/**
* Get API key for a specific service from environment variables
*
* @private
* @static
* @param service - The service to get API key for
* @returns API key if available, undefined otherwise
*/
static getApiKey(service) {
switch (service) {
case config_1.LlmService.OpenAI:
return config_1.config.apiKeys.openai;
case config_1.LlmService.Anthropic:
return config_1.config.apiKeys.anthropic;
case config_1.LlmService.Gemini:
return config_1.config.apiKeys.gemini;
case config_1.LlmService.Ollama:
return "ollama"; // Ollama doesn't need an API key, but we provide a dummy value
default:
return undefined;
}
}
/**
* Get default model for a specific service
*
* @private
* @static
* @param service - The service to get default model for
* @returns Default model identifier for the service
* @throws {Error} When service is not supported
*/
static getDefaultModel(service) {
switch (service) {
case config_1.LlmService.OpenAI:
return config_1.config.llm.models[config_1.LlmService.OpenAI];
case config_1.LlmService.Anthropic:
return config_1.config.llm.models[config_1.LlmService.Anthropic];
case config_1.LlmService.Gemini:
return config_1.config.llm.models[config_1.LlmService.Gemini];
case config_1.LlmService.Ollama:
return config_1.config.llm.models[config_1.LlmService.Ollama];
default:
throw new Error(`Unsupported service: ${service}`);
}
}
/**
* Create an LLM service instance with automatic configuration
*
* Detects service configuration from environment variables or provided
* config, validates API keys, and creates the appropriate service instance.
* Only falls back to default when NO service is explicitly requested.
*
* Configuration is managed through the centralized config system which reads:
* - MCP_LLM_SERVICE: Service to use (default: openai ONLY if not set)
* - MCP_LLM_MODEL: Model to use (falls back to service defaults)
* - MCP_MAX_TOKENS: Maximum tokens (default: 2000)
* - *_API_KEY: API keys for each service (ANTHROPIC_API_KEY, OPENAI_API_KEY, GEMINI_API_KEY)
* - OLLAMA_URL: Ollama server URL (default: localhost:11434)
*
* @static
* @param config - Optional partial configuration to override defaults
* @returns Configured AI service instance
* @throws {Error} When explicitly requested service is not properly configured
*/
static async createService(serviceConfig) {
// Get explicitly requested service
const requestedService = serviceConfig?.service || config_1.config.env.llmService;
// Only use default if NO service is explicitly requested
const service = (requestedService ||
config_1.config.llm.defaultService);
const isExplicitlyRequested = !!requestedService;
// Log service selection for debugging
if (isExplicitlyRequested) {
logger_1.Logger.log("info", `LLM service explicitly configured: ${service}`);
}
else {
logger_1.Logger.log("info", `LLM service defaulted to: ${service} (no MCP_LLM_SERVICE set)`);
}
// Get model from config or environment variable, or use default
const model = serviceConfig?.model ||
config_1.config.env.llmModel ||
this.getDefaultModel(service);
// Strict validation: if service is explicitly requested, it must be properly configured
if (isExplicitlyRequested) {
// For cloud services, check API key availability
if (service !== config_1.LlmService.Ollama) {
const apiKey = this.getApiKey(service);
if (!apiKey) {
throw new Error(`❌ LLM service '${service}' was explicitly requested but is not properly configured.\n` +
` Missing API key. Please set ${service.toUpperCase()}_API_KEY environment variable.\n` +
` Or remove MCP_LLM_SERVICE to use the default service.`);
}
}
// For Ollama, we'll test connectivity during service creation below
}
else {
// For default service, ensure it has proper configuration
const apiKey = this.getApiKey(service);
if (!apiKey && service !== config_1.LlmService.Ollama) {
throw new Error(`API key not found for default service ${service}. Please set ${service.toUpperCase()}_API_KEY environment variable.`);
}
}
const maxTokens = serviceConfig?.maxTokens || config_1.config.llm.maxTokens;
// Get API key after validation
const apiKey = this.getApiKey(service);
logger_1.Logger.log("info", `Creating ${service} service with model: ${model}`);
switch (service) {
case config_1.LlmService.OpenAI:
return new openai_service_1.OpenAIService({
apiKey: apiKey,
model,
maxTokens,
});
case config_1.LlmService.Anthropic:
return new anthropic_service_1.AnthropicService({
apiKey: apiKey,
model,
maxTokens,
});
case config_1.LlmService.Gemini:
return new gemini_service_1.GeminiService({
apiKey: apiKey,
model,
maxTokens,
});
case config_1.LlmService.Ollama:
const ollamaConfig = {
apiKey: "ollama", // Dummy value for interface compatibility
model,
maxTokens,
baseURL: serviceConfig?.baseURL || config_1.config.embeddings.ollama.url,
};
const ollamaService = new ollama_service_1.OllamaService(ollamaConfig);
// Test Ollama connectivity immediately during service creation
logger_1.Logger.log("info", `Testing Ollama connectivity at ${ollamaConfig.baseURL}...`);
try {
await ollamaService.testConnection();
logger_1.Logger.log("info", "✅ Ollama connectivity test successful");
}
catch (error) {
const baseErrorMsg = `❌ Ollama connectivity test failed: ${error}`;
const helpMsg = isExplicitlyRequested
? `\n Ollama service was explicitly requested but is not running.\n Please start Ollama with: ollama serve\n Or remove MCP_AI_SERVICE=ollama to use a different service.`
: `\n Please ensure Ollama is running at ${ollamaConfig.baseURL}`;
const fullErrorMsg = baseErrorMsg + helpMsg;
logger_1.Logger.log("error", fullErrorMsg);
throw new Error(fullErrorMsg);
}
return ollamaService;
default:
throw new Error(`❌ Unsupported LLM service: '${service}'.\n` +
` Supported services: anthropic, openai, gemini, ollama\n` +
` Please check your MCP_LLM_SERVICE environment variable.`);
}
}
/**
* Check if a specific service is properly configured
*
* Validates that the required API key is available for cloud services
* or that the service is accessible for local services like Ollama.
*
* @static
* @param service - Service to check configuration for
* @returns True if service is configured and ready to use
*/
static isServiceConfigured(service) {
if (service === config_1.LlmService.Ollama) {
// For Ollama, we'll do runtime checks in the service itself
return true;
}
return !!this.getApiKey(service);
}
}
exports.AIServiceFactory = AIServiceFactory;
//# sourceMappingURL=service-factory.js.map