mcp-adr-analysis-server
Version:
MCP server for analyzing Architectural Decision Records and project architecture
292 lines • 10.3 kB
JavaScript
/**
* AI Executor Service for OpenRouter.ai Integration
*
* This service handles the execution of prompts using OpenRouter.ai,
* transforming the MCP server from returning prompts to returning actual results.
*/
import OpenAI from 'openai';
import { loadAIConfig, validateAIConfig, isAIExecutionEnabled } from '../config/ai-config.js';
/**
* AI Executor Service Class
*/
export class AIExecutor {
client = null;
config;
cache = new Map();
constructor(config) {
this.config = config || loadAIConfig();
this.initializeClient();
}
/**
* Initialize OpenAI client for OpenRouter
*/
initializeClient() {
if (!isAIExecutionEnabled(this.config)) {
console.log('AI execution disabled - running in prompt-only mode');
return;
}
try {
validateAIConfig(this.config);
this.client = new OpenAI({
baseURL: this.config.baseURL,
apiKey: this.config.apiKey,
timeout: this.config.timeout,
maxRetries: this.config.maxRetries,
defaultHeaders: {
'HTTP-Referer': this.config.siteUrl || '',
'X-Title': this.config.siteName || '',
},
});
console.log(`AI Executor initialized with model: ${this.config.defaultModel}`);
}
catch (error) {
console.error('Failed to initialize AI Executor:', error);
this.client = null;
}
}
/**
* Check if AI execution is available
*/
isAvailable() {
// Reload configuration to pick up environment variable changes
this.reloadConfigIfNeeded();
return this.client !== null && isAIExecutionEnabled(this.config);
}
/**
* Reload configuration if environment variables have changed
*/
reloadConfigIfNeeded() {
const currentConfig = loadAIConfig();
// Check if key configuration has changed
const configChanged = this.config.apiKey !== currentConfig.apiKey ||
this.config.executionMode !== currentConfig.executionMode ||
this.config.defaultModel !== currentConfig.defaultModel;
if (configChanged) {
console.log('AI configuration changed, reinitializing...');
this.config = currentConfig;
this.initializeClient();
}
}
/**
* Execute a prompt and return the AI response
*/
async executePrompt(prompt, options = {}) {
// Ensure configuration is up to date before execution
this.reloadConfigIfNeeded();
if (!this.isAvailable()) {
throw this.createError('AI execution not available - check configuration', 'AI_UNAVAILABLE', false);
}
const startTime = Date.now();
const model = options.model || this.config.defaultModel;
const cacheKey = this.generateCacheKey(prompt, model, options);
// Check cache first
if (this.config.cacheEnabled) {
const cached = this.getCachedResult(cacheKey);
if (cached) {
return cached;
}
}
let retryCount = 0;
const maxRetries = this.config.maxRetries;
while (retryCount <= maxRetries) {
try {
const messages = [];
if (options.systemPrompt) {
messages.push({ role: 'system', content: options.systemPrompt });
}
messages.push({ role: 'user', content: prompt });
const completion = await this.client.chat.completions.create({
model,
messages,
temperature: options.temperature ?? this.config.temperature,
max_tokens: options.maxTokens ?? this.config.maxTokens,
});
const result = {
content: completion.choices[0]?.message?.content || '',
model: completion.model,
metadata: {
executionTime: Date.now() - startTime,
cached: false,
retryCount,
timestamp: new Date().toISOString(),
},
};
if (completion.usage) {
result.usage = {
promptTokens: completion.usage.prompt_tokens,
completionTokens: completion.usage.completion_tokens,
totalTokens: completion.usage.total_tokens,
};
}
// Cache the result
if (this.config.cacheEnabled) {
this.setCachedResult(cacheKey, result);
}
return result;
}
catch (error) {
retryCount++;
if (retryCount > maxRetries) {
throw this.createError(`AI execution failed after ${maxRetries} retries: ${error}`, 'AI_EXECUTION_FAILED', false, error);
}
// Wait before retry (exponential backoff)
const delay = Math.min(1000 * Math.pow(2, retryCount - 1), 10000);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw this.createError('Unexpected error in AI execution', 'AI_UNEXPECTED_ERROR', false);
}
/**
* Execute a structured prompt that expects JSON response
*/
async executeStructuredPrompt(prompt, schema, options = {}) {
const systemPrompt = options.systemPrompt ||
'You are a helpful assistant that responds with valid JSON. Always return properly formatted JSON that matches the requested schema. Do not wrap the JSON in markdown code blocks.';
const result = await this.executePrompt(prompt, {
...options,
systemPrompt,
temperature: options.temperature ?? 0.1, // Lower temperature for structured output
});
try {
// Extract JSON from response, handling markdown code blocks
const jsonContent = this.extractJsonFromResponse(result.content);
const data = JSON.parse(jsonContent);
// Basic schema validation if provided
if (schema && typeof schema.parse === 'function') {
schema.parse(data);
}
return { data, raw: result };
}
catch (error) {
throw this.createError(`Failed to parse JSON response: ${error}`, 'AI_JSON_PARSE_ERROR', false, error);
}
}
/**
* Extract JSON content from AI response, handling markdown code blocks
*/
extractJsonFromResponse(content) {
// Remove leading/trailing whitespace
content = content.trim();
// Check if content is wrapped in markdown code blocks
const codeBlockMatch = content.match(/^```(?:json)?\s*\n?([\s\S]*?)\n?```$/);
if (codeBlockMatch && codeBlockMatch[1]) {
return codeBlockMatch[1].trim();
}
// Check for inline code blocks
const inlineCodeMatch = content.match(/^`([\s\S]*?)`$/);
if (inlineCodeMatch && inlineCodeMatch[1]) {
return inlineCodeMatch[1].trim();
}
// Try to find JSON object/array in the content
const jsonMatch = content.match(/(\{[\s\S]*\}|\[[\s\S]*\])/);
if (jsonMatch && jsonMatch[1]) {
return jsonMatch[1].trim();
}
// Return as-is if no patterns match
return content;
}
/**
* Generate cache key for a prompt execution
*/
generateCacheKey(prompt, model, options) {
const key = JSON.stringify({ prompt, model, options });
return Buffer.from(key).toString('base64').slice(0, 32);
}
/**
* Get cached result if available and not expired
*/
getCachedResult(cacheKey) {
const cached = this.cache.get(cacheKey);
if (!cached)
return null;
if (Date.now() > cached.expiry) {
this.cache.delete(cacheKey);
return null;
}
// Mark as cached
const result = { ...cached.result };
result.metadata = { ...result.metadata, cached: true };
return result;
}
/**
* Cache a result
*/
setCachedResult(cacheKey, result) {
const expiry = Date.now() + (this.config.cacheTTL * 1000);
this.cache.set(cacheKey, { result, expiry });
// Clean up expired entries periodically
if (this.cache.size > 100) {
this.cleanupCache();
}
}
/**
* Clean up expired cache entries
*/
cleanupCache() {
const now = Date.now();
for (const [key, value] of this.cache.entries()) {
if (now > value.expiry) {
this.cache.delete(key);
}
}
}
/**
* Create a standardized AI execution error
*/
createError(message, code, retryable, originalError) {
const error = new Error(message);
error.code = code;
error.retryable = retryable;
error.originalError = originalError;
return error;
}
/**
* Get current configuration
*/
getConfig() {
return { ...this.config };
}
/**
* Update configuration
*/
updateConfig(newConfig) {
this.config = { ...this.config, ...newConfig };
this.initializeClient();
}
/**
* Clear cache
*/
clearCache() {
this.cache.clear();
}
/**
* Get cache statistics
*/
getCacheStats() {
// This is a simplified implementation
return {
size: this.cache.size,
hitRate: 0 // Would need to track hits/misses for accurate calculation
};
}
}
/**
* Global AI executor instance
*/
let globalExecutor = null;
/**
* Get or create the global AI executor instance
*/
export function getAIExecutor() {
if (!globalExecutor) {
globalExecutor = new AIExecutor();
}
return globalExecutor;
}
/**
* Reset the global AI executor (useful for testing)
*/
export function resetAIExecutor() {
globalExecutor = null;
}
//# sourceMappingURL=ai-executor.js.map