mcp-prompt-optimizer
Version:
Professional cloud-based MCP server for AI-powered prompt optimization with intelligent context detection, Bayesian optimization, AG-UI real-time optimization, template auto-save, optimization insights, personal model configuration via WebUI, team collabo
1,052 lines (918 loc) • 44.5 kB
JavaScript
/**
* MCP Prompt Optimizer - Professional Cloud-Based MCP Server
* Production-grade with Bayesian optimization, AG-UI real-time features, enhanced network resilience,
* development mode, and complete backend alignment
*
* Version: 2.2.0 - Aligned with FastAPI Backend production-v2.2.0-stable
*/
const { Server } = require('@modelcontextprotocol/sdk/server/index.js');
const { StdioServerTransport } = require('@modelcontextprotocol/sdk/server/stdio.js');
const { CallToolRequestSchema, ListToolsRequestSchema } = require('@modelcontextprotocol/sdk/types.js');
const https = require('https');
const CloudApiKeyManager = require('./lib/api-key-manager');
const packageJson = require('./package.json');
const API_KEYS_PREFIX = '/api/v1/api-keys';
const ENDPOINTS = {
/** Detect AI context (POST) */
DETECT_CONTEXT: '/api/v1/templates/detect-context',
/** Prompt optimization (POST) */
OPTIMIZE: '/api/v1/optimize',
/** CRUD on templates */
TEMPLATE: {
/** Create (POST) */
CREATE: '/api/v1/templates/',
/** Read (GET) */
GET: (id) => `/api/v1/templates/${id}`,
/** Update (PATCH) */
UPDATE: (id) => `/api/v1/templates/${id}`,
/** Delete (DELETE) */
DELETE: (id) => `/api/v1/templates/${id}`,
},
/**
* Search templates
* (In the current code‑base this is *not* a dedicated `/search` path –
* the original implementation appends query‑params to `/api/v1/templates/`.
* We keep the original path here so that the existing `handleSearchTemplates`
* handler does not need any change other than using the constant.
*/
SEARCH_TEMPLATES: '/api/v1/templates/',
/** Quota status (GET) – note the plural “api‑keys” */
QUOTA_STATUS: `${API_KEYS_PREFIX}/quota-status`,
/** Validate API key (POST) – also under the plural router */
VALIDATE_KEY: `${API_KEYS_PREFIX}/validate`,
/** Bayesian insights (GET) */
ANALYTICS_BAYESIAN_INSIGHTS:
'/api/v1/analytics/bayesian-insights',
/** AG‑UI status (GET) */
AGUI_STATUS: '/api/v1/agui/status',
};
class MCPPromptOptimizer {
constructor() {
this.server = new Server(
{
name: "mcp-prompt-optimizer",
version: packageJson.version,
},
{
capabilities: {
tools: {},
},
}
);
this.backendUrl = process.env.OPTIMIZER_BACKEND_URL || 'https://p01--project-optimizer--fvmrdk8m9k9j.code.run';
this.apiKey = process.env.OPTIMIZER_API_KEY;
this.developmentMode = process.env.NODE_ENV === 'development' || process.env.OPTIMIZER_DEV_MODE === 'true';
this.requestTimeout = parseInt(process.env.OPTIMIZER_REQUEST_TIMEOUT) || 30000;
// NEW: Feature flags aligned with backend
this.bayesianOptimizationEnabled = process.env.ENABLE_BAYESIAN_OPTIMIZATION === 'true';
this.aguiFeatures = process.env.ENABLE_AGUI_FEATURES === 'true';
this.setupMCPHandlers();
}
setupMCPHandlers() {
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
const baseTools = [
{
name: "optimize_prompt",
description: "🎯 Professional AI-powered prompt optimization with intelligent context detection, Bayesian optimization, template auto-save, and comprehensive optimization insights",
inputSchema: {
type: "object",
properties: {
prompt: {
type: "string",
description: "The prompt text to optimize"
},
goals: {
type: "array",
items: { type: "string" },
description: "Optimization goals (e.g., 'clarity', 'conciseness', 'creativity', 'technical_accuracy', 'analytical_depth', 'creative_enhancement')",
default: ["clarity"]
},
ai_context: {
type: "string",
enum: [
"human_communication", "llm_interaction", "image_generation", "technical_automation",
"structured_output", "code_generation", "api_automation"
],
description: "The context for the AI's task (auto-detected if not specified with enhanced detection)"
},
enable_bayesian: {
type: "boolean",
description: "Enable Bayesian optimization features for parameter tuning (if available)",
default: true
}
},
required: ["prompt"]
}
},
{
name: "get_quota_status",
description: "📊 Check subscription status, quota usage, and account information with detailed insights and Bayesian optimization metrics",
inputSchema: { type: "object", properties: {}, additionalProperties: false }
},
{
name: "create_template",
description: "➕ Create a new optimization template.",
inputSchema: {
type: "object",
properties: {
title: { type: "string", description: "Title of the template" },
description: { type: "string", description: "Description of the template" },
original_prompt: { type: "string", description: "The original prompt text" },
optimized_prompt: { type: "string", description: "The optimized prompt text" },
optimization_goals: { type: "array", items: { type: "string" }, description: "Goals for this optimization (e.g., 'clarity', 'conciseness', 'creativity', 'technical_accuracy', 'analytical_depth', 'creative_enhancement')" },
confidence_score: { type: "number", description: "Confidence score of the optimization (0.0-1.0)" },
model_used: { type: "string", description: "Model used for optimization" },
optimization_tier: { type: "string", description: "Tier of optimization (e.g., rules, llm, hybrid)" },
ai_context_detected: { type: "string", description: "Detected AI context (e.g., code_generation, image_generation)" },
is_public: { type: "boolean", default: false, description: "Whether the template is public" },
tags: { type: "array", items: { type: "string" }, description: "Tags for the template" }
},
required: ["title", "original_prompt", "optimized_prompt", "confidence_score"]
}
},
{
name: "get_template",
description: "🔍 Retrieve a specific template by its ID.",
inputSchema: {
type: "object",
properties: {
template_id: { type: "string", description: "The ID of the template to retrieve" }
},
required: ["template_id"]
}
},
{
name: "update_template",
description: "✏️ Update an existing optimization template.",
inputSchema: {
type: "object",
properties: {
template_id: { type: "string", description: "The ID of the template to update" },
title: { type: "string", description: "New title for the template" },
description: { type: "string", description: "New description for the template" },
original_prompt: { type: "string", description: "New original prompt text" },
optimized_prompt: { type: "string", description: "New optimized prompt text" },
optimization_goals: { type: "array", items: { type: "string" }, description: "New optimization goals" },
confidence_score: { type: "number", description: "New confidence score (0.0-1.0)" },
model_used: { type: "string", description: "New model used for optimization" },
optimization_tier: { type: "string", description: "New tier of optimization" },
ai_context_detected: { type: "string", description: "New detected AI context" },
is_public: { type: "boolean", description: "Whether the template is public" },
tags: { type: "array", items: { type: "string" }, description: "New tags for the template" }
},
required: ["template_id"]
}
},
{
name: "search_templates",
description: "🔍 Search your saved template library with AI-aware filtering, context-based search, and sophisticated template matching",
inputSchema: {
type: "object",
properties: {
query: {
type: "string",
description: "Search term to filter templates by content or title"
},
ai_context: {
type: "string",
enum: ["human_communication", "llm_interaction", "image_generation", "technical_automation", "structured_output", "code_generation", "api_automation"],
description: "Filter templates by AI context type"
},
sophistication_level: {
type: "string",
enum: ["basic", "intermediate", "advanced", "expert"],
description: "Filter by template sophistication level"
},
complexity_level: {
type: "string",
enum: ["simple", "moderate", "complex", "very_complex"],
description: "Filter by template complexity level"
},
optimization_strategy: {
type: "string",
description: "Filter by optimization strategy used"
},
limit: {
type: "number",
default: 5,
description: "Number of templates to return (1-20)"
},
sort_by: {
type: "string",
enum: ["created_at", "confidence_score", "usage_count", "title"],
default: "confidence_score",
description: "Sort templates by field"
},
sort_order: {
type: "string",
enum: ["asc", "desc"],
default: "desc",
description: "Sort order"
}
}
}
},
{
name: "detect_ai_context",
description: "🧠 Detects the AI context for a given prompt using advanced backend analysis.",
inputSchema: {
type: "object",
properties: {
prompt: {
type: "string",
description: "The prompt text for which to detect the AI context"
}
},
required: ["prompt"]
}
},
];
// Add advanced tools if Bayesian optimization is enabled
if (this.bayesianOptimizationEnabled) {
baseTools.push({
name: "get_optimization_insights",
description: "🧠 Get advanced Bayesian optimization insights, performance analytics, and parameter tuning recommendations",
inputSchema: {
type: "object",
properties: {
analysis_depth: {
type: "string",
enum: ["basic", "detailed", "comprehensive"],
default: "detailed",
description: "Depth of analysis to provide"
},
include_recommendations: {
type: "boolean",
default: true,
description: "Include optimization recommendations"
}
}
}
});
}
// Add AG-UI tools if enabled
if (this.aguiFeatures) {
baseTools.push({
name: "get_real_time_status",
description: "⚡ Get real-time optimization status, AG-UI capabilities, and streaming optimization availability",
inputSchema: { type: "object", properties: {}, additionalProperties: false }
});
}
return { tools: baseTools };
});
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case "optimize_prompt": return await this.handleOptimizePrompt(args);
case "get_quota_status": return await this.handleGetQuotaStatus();
case "search_templates": return await this.handleSearchTemplates(args);
case "detect_ai_context": return await this.handleDetectAIContext(args);
case "create_template": return await this.handleCreateTemplate(args);
case "get_template": return await this.handleGetTemplate(args);
case "update_template": return await this.handleUpdateTemplate(args);
case "get_optimization_insights": return await this.handleGetOptimizationInsights(args);
case "get_real_time_status": return await this.handleGetRealTimeStatus();
default: throw new Error(`Unknown tool: ${name}`);
}
} catch (error) {
throw new Error(`Tool execution failed: ${error.message}`);
}
});
}
generateMockOptimization(prompt, goals, aiContext, enableBayesian = false) {
const optimized_prompt = `Enhanced for ${aiContext}: ${prompt}`;
const baseResult = {
optimized_prompt,
confidence_score: 0.87,
tier: 'explorer',
mock_mode: true,
template_saved: true,
template_id: 'test-template-123',
templates_found: [{ title: 'Similar Template 1', confidence_score: 0.85, id: 'tmpl-1' }],
optimization_insights: {
improvement_metrics: {
clarity_improvement: 0.25,
specificity_improvement: 0.20,
length_optimization: 0.15,
context_alignment: 0.30
},
user_patterns: {
optimization_confidence: '87.0%',
prompt_complexity: 'intermediate',
ai_context: aiContext
},
recommendations: [
`Context detected as ${aiContext}`,
'Enhanced goal optimization applied',
'Template auto-save threshold met'
]
}
};
// Add Bayesian optimization insights if enabled
if (enableBayesian && this.bayesianOptimizationEnabled) {
baseResult.bayesian_insights = {
parameter_optimization: {
temperature_adjustment: '+0.1',
context_weight: '+0.15',
goal_prioritization: 'clarity > specificity > engagement'
},
performance_prediction: {
expected_improvement: '12-18%',
confidence_interval: '85-95%',
optimization_strategy: 'gradient_boost_context'
},
next_optimization_recommendation: {
suggested_goals: ['analytical_depth', 'creative_enhancement'],
estimated_improvement: '8-12%'
}
};
}
return baseResult;
}
generateMockContextDetection(prompt) {
let primary_context = 'human_communication'; // Default context
const lc = prompt.toLowerCase(); // one‑off lower‑case copy
/* 1️⃣ Code / programming – now includes `def` / `return`. */
if (lc.match(/def\b|return\b|import\b|class\b|for\b|while\b|if\b|else\b|elif\b|function\b|code\b|python|javascript|java|c\+\+/i)) {
primary_context = 'code_generation';
/* 2️⃣ Image / art – unchanged. */
} else if (lc.match(/image|generate|dall-e|midjourney/i)) {
primary_context = 'image_generation';
/* 3️⃣ Automation – unchanged. */
} else if (lc.match(/automate|script|api/i)) {
primary_context = 'technical_automation';
/* 4️⃣ LLM / analysis – newly added keyword “analyze”. */
} else if (lc.match(/analyze|explain|evaluate|summary|research|paper|analysis|interpret|discussion|assessment|compare|contrast/i)) {
primary_context = 'llm_interaction';
}
return {
primary_context: primary_context,
confidence: 0.75,
secondary_contexts: ['llm_interaction'],
detected_parameters: [],
mock_mode: true,
reason: 'Running in development/fallback mode.'
};
}
async handleOptimizePrompt(args) {
if (!args.prompt) throw new Error('Prompt is required');
const manager = new CloudApiKeyManager(this.apiKey, { developmentMode: this.developmentMode });
try {
const validation = await manager.validateApiKey();
if (validation.mock_mode || this.developmentMode) {
// In mock/dev mode, we still need a context for mock generation
const mockContext = args.ai_context || 'human_communication';
const mockGoals = args.goals || ['clarity'];
const mockEnableBayesian = args.enable_bayesian !== false && this.bayesianOptimizationEnabled;
const mockResult = this.generateMockOptimization(args.prompt, mockGoals, mockContext, mockEnableBayesian);
const formatted = this.formatOptimizationResult(mockResult, { detectedContext: mockContext, enableBayesian: mockEnableBayesian });
return { content: [{ type: "text", text: formatted }] };
}
// 1. Detect AI Context from backend
let detectedContext = args.ai_context;
if (!detectedContext) {
try {
const contextDetectionResult = await this.callBackendAPI(ENDPOINTS.DETECT_CONTEXT, { prompt: args.prompt });
detectedContext = contextDetectionResult.primary_context;
console.error(`Detected AI Context from backend: ${detectedContext}`);
} catch (contextError) {
console.error(`Failed to detect AI context from backend, falling back to default: ${contextError.message}`);
detectedContext = 'human_communication'; // Fallback
}
}
// 2. Call the main optimization endpoint
const enableBayesian = args.enable_bayesian !== false && this.bayesianOptimizationEnabled;
const optimizationPayload = {
prompt: args.prompt,
goals: args.goals || ['clarity'], // Pass original goals, backend will enhance
context: {
ai_context: detectedContext
},
enable_bayesian: enableBayesian,
metadata: {
mcp_version: packageJson.version,
feature_flags: {
bayesian_optimization: this.bayesianOptimizationEnabled,
agui_features: this.aguiFeatures
}
}
};
const result = await this.callBackendAPI(ENDPOINTS.OPTIMIZE, optimizationPayload);
return { content: [{ type: "text", text: this.formatOptimizationResult(result, { detectedContext, enableBayesian }) }] };
} catch (error) {
if (error.message.includes('Network') || error.message.includes('DNS') || error.message.includes('timeout') || error.message.includes('Connection')) {
const fallbackContext = args.ai_context || 'human_communication';
const fallbackEnableBayesian = args.enable_bayesian !== false && this.bayesianOptimizationEnabled;
const fallbackResult = this.generateMockOptimization(args.prompt, args.goals || ['clarity'], fallbackContext, fallbackEnableBayesian);
fallbackResult.fallback_mode = true;
fallbackResult.error_reason = error.message;
const formatted = this.formatOptimizationResult(fallbackResult, { detectedContext: fallbackContext, enableBayesian: fallbackEnableBayesian });
return { content: [{ type: "text", text: formatted }] };
}
throw new Error(`Optimization failed: ${error.message}`);
}
}
async handleGetQuotaStatus() {
const manager = new CloudApiKeyManager(this.apiKey, { developmentMode: this.developmentMode });
const info = await manager.getApiKeyInfo();
return { content: [{ type: "text", text: this.formatQuotaStatus(info) }] };
}
async handleSearchTemplates(args) {
try {
const params = new URLSearchParams({
page: '1',
per_page: Math.min(args.limit || 5, 20).toString(),
sort_by: args.sort_by || 'confidence_score',
sort_order: args.sort_order || 'desc'
});
if (args.query) params.append('query', args.query);
if (args.ai_context) params.append('ai_context', args.ai_context);
if (args.sophistication_level) params.append('sophistication_level', args.sophistication_level);
if (args.complexity_level) params.append('complexity_level', args.complexity_level);
if (args.optimization_strategy) params.append('optimization_strategy', args.optimization_strategy);
const endpoint = `${ENDPOINTS.SEARCH_TEMPLATES}?${params.toString()}`;
const result = await this.callBackendAPI(endpoint, null, 'GET');
const searchResult = {
templates: result.templates || [],
total: result.total || 0,
query: args.query,
ai_context: args.ai_context,
sophistication_level: args.sophistication_level,
complexity_level: args.complexity_level
};
const formatted = this.formatTemplateSearchResults(searchResult, args);
return { content: [{ type: "text", text: formatted }] };
} catch (error) {
console.error(`Template search failed: ${error.message}`);
const fallbackResult = {
templates: [],
total: 0,
message: "Template search is temporarily unavailable.",
error: error.message,
fallback_mode: true
};
const formatted = this.formatTemplateSearchResults(fallbackResult, args);
return { content: [{ type: "text", text: formatted }] };
}
}
async handleGetOptimizationInsights(args) {
if (!this.bayesianOptimizationEnabled) {
return { content: [{ type: "text", text: "🧠 Bayesian optimization features are not enabled. Set ENABLE_BAYESIAN_OPTIMIZATION=true to access advanced insights." }] };
}
try {
// Try to get insights from backend
const endpoint = `${ENDPOINTS.ANALYTICS_BAYESIAN_INSIGHTS}?depth=${args.analysis_depth || 'detailed'}&recommendations=${args.include_recommendations !== false}`;
const result = await this.callBackendAPI(endpoint, null, 'GET');
return { content: [{ type: "text", text: this.formatOptimizationInsights(result) }] };
} catch (error) {
// Fallback to mock insights
const mockInsights = {
bayesian_status: {
optimization_active: true,
total_optimizations: 47,
improvement_rate: '23.5%',
confidence_score: 0.89
},
parameter_insights: {
most_effective_goals: ['clarity', 'technical_accuracy', 'analytical_depth'],
context_performance: {
'code_generation': 0.92,
'llm_interaction': 0.87,
'technical_automation': 0.84
},
optimization_trends: 'Steady improvement in technical contexts'
},
recommendations: args.include_recommendations !== false ? [
'Focus on technical_accuracy for code generation prompts',
'Combine clarity with analytical_depth for best results',
'Consider using structured_output context for data tasks'
] : []
};
return { content: [{ type: "text", text: this.formatOptimizationInsights(mockInsights) }] };
}
}
async handleGetRealTimeStatus() {
if (!this.aguiFeatures) {
return { content: [{ type: "text", text: "⚡ AG-UI real-time features are not enabled. Set ENABLE_AGUI_FEATURES=true to access real-time optimization capabilities." }] };
}
try {
const result = await this.callBackendAPI(ENDPOINTS.AGUI_STATUS, null, 'GET');
return { content: [{ type: "text", text: this.formatRealTimeStatus(result) }] };
} catch (error) {
const mockStatus = {
agui_status: 'available',
streaming_optimization: true,
websocket_support: true,
real_time_analytics: true,
active_optimizations: 3,
average_response_time: '1.2s',
features: {
live_optimization: true,
collaborative_editing: true,
instant_feedback: true,
performance_monitoring: true
}
};
return { content: [{ type: "text", text: this.formatRealTimeStatus(mockStatus) }] };
}
}
async handleDetectAIContext(args) {
if (!args.prompt) throw new Error('Prompt is required');
const formatResult = (result) => {
let output = `# 🧠 AI Context Detection Result\n\n`;
output += `**Primary Context:** ${result.primary_context}\n`;
output += `**Confidence:** ${(result.confidence * 100).toFixed(1)}%\n`;
if (result.secondary_contexts && result.secondary_contexts.length > 0) {
output += `**Secondary Contexts:** ${result.secondary_contexts.join(', ')}\n`;
}
const detections = result.detected_parameters ?? [];
const safeDetections = detections.filter(d => d && d.name);
if (safeDetections.length > 0) {
output += `**Detected Parameters:** ${safeDetections.map(d => d.name).join(', ')}\n`;
}
if (result.mock_mode) {
output += `\n⚠️ **Fallback Mode Active:** Using mock data due to development mode or network issues.\n`;
}
return { content: [{ type: "text", text: output }] };
};
try {
const manager = new CloudApiKeyManager(this.apiKey, { developmentMode: this.developmentMode });
const validation = await manager.validateApiKey();
if (validation.mock_mode || this.developmentMode) {
const mockResult = this.generateMockContextDetection(args.prompt);
return formatResult(mockResult);
}
const result = await this.callBackendAPI(ENDPOINTS.DETECT_CONTEXT, { prompt: args.prompt });
return formatResult(result);
} catch (error) {
// Fallback for ANY error during the process (missing key, network, etc.)
const fallbackResult = this.generateMockContextDetection(args.prompt);
return formatResult(fallbackResult);
}
}
async handleCreateTemplate(args) {
try {
const result = await this.callBackendAPI(ENDPOINTS.TEMPLATE.CREATE, args);
let output = `# ✅ Template Created Successfully!\n\n`;
output += `**Title:** ${result.title}\n`;
output += `**ID:** \`${result.id}\`\n`;
output += `**Confidence Score:** ${(result.confidence_score * 100).toFixed(1)}%\n`;
output += `**AI Context:** ${result.ai_context_detected || 'N/A'}\n`;
output += `**Public:** ${result.is_public ? 'Yes' : 'No'}\n`;
output += `\n**Optimized Prompt Preview:**\n\`\`\`\n${result.optimized_prompt.substring(0, 150)}...\n\`\`\`\n`;
return { content: [{ type: "text", text: output }] };
} catch (error) {
throw new Error(`Failed to create template: ${error.message}`);
}
}
async handleGetTemplate(args) {
if (!args.template_id) throw new Error('Template ID is required');
try {
const result = await this.callBackendAPI(ENDPOINTS.TEMPLATE.GET(args.template_id), null, 'GET');
let output = `# 📄 Template Details\n\n`;
output += `**Title:** ${result.title}\n`;
output += `**ID:** \`${result.id}\`\n`;
output += `**Description:** ${result.description || 'N/A'}\n`;
output += `**AI Context:** ${result.ai_context_detected || 'N/A'}\n`;
output += `**Confidence Score:** ${(result.confidence_score * 100).toFixed(1)}%\n`;
output += `**Public:** ${result.is_public ? 'Yes' : 'No'}\n`;
output += `**Tags:** ${result.tags ? result.tags.join(', ') : 'None'}\n\n`;
output += `**Original Prompt:**\n\`\`\`\n${result.original_prompt}\n\`\`\`\n\n`;
output += `**Optimized Prompt:**\n\`\`\`\n${result.optimized_prompt}\n\`\`\`\n`;
return { content: [{ type: "text", text: output }] };
} catch (error) {
throw new Error(`Failed to retrieve template: ${error.message}`);
}
}
async handleUpdateTemplate(args) {
if (!args.template_id) throw new Error('Template ID is required');
try {
const { template_id, ...updateData } = args;
// Filter out undefined values so we only send fields that are being updated
Object.keys(updateData).forEach(key => updateData[key] === undefined && delete updateData[key]);
const result = await this.callBackendAPI(ENDPOINTS.TEMPLATE.UPDATE(template_id), updateData, 'PATCH'); // PATCH is better for partial updates
let output = `# ✅ Template Updated Successfully!\n\n`;
output += `**ID:** \`${result.id}\`\n`;
output += `**Title:** ${result.title}\n\n`;
output += `Use 'get_template' with the ID to see the full updated template.`;
return { content: [{ type: "text", text: output }] };
} catch (error) {
throw new Error(`Failed to update template: ${error.message}`);
}
}
async _buildUrl(path) {
return `${this.backendUrl}${path}`;
}
async callBackendAPI(endpoint, data, method = 'POST') {
return new Promise((resolve, reject) => {
const url = this._buildUrl(endpoint);
const options = {
method: method,
headers: {
'x-api-key': this.apiKey,
'Content-Type': 'application/json',
'User-Agent': `mcp-prompt-optimizer/${packageJson.version}`,
'Accept': 'application/json',
'Connection': 'close'
},
timeout: this.requestTimeout
};
const client = this.backendUrl.startsWith('https://') ? https : require('http');
const req = client.request(url, options, (res) => {
let responseData = '';
res.on('data', (chunk) => {
responseData += chunk;
});
res.on('end', () => {
try {
if (res.statusCode >= 200 && res.statusCode < 300) {
const parsed = JSON.parse(responseData);
resolve(parsed);
} else {
let errorMessage;
try {
const error = JSON.parse(responseData);
errorMessage = error.detail || error.message || `HTTP ${res.statusCode}`;
} catch {
errorMessage = `HTTP ${res.statusCode}: ${responseData}`;
}
reject(new Error(errorMessage));
}
} catch (parseError) {
reject(new Error(`Invalid response format: ${parseError.message}`));
}
});
});
req.on('error', (error) => {
if (error.code === 'ENOTFOUND') {
reject(new Error(`DNS resolution failed: Cannot resolve ${this.backendUrl.replace(/^https?:\/\//, '')}`));
} else if (error.code === 'ECONNREFUSED') {
reject(new Error(`Connection refused: Backend server may be down`));
} else if (error.code === 'ETIMEDOUT') {
reject(new Error(`Connection timeout: Backend server is not responding`));
} else if (error.code === 'ECONNRESET') {
reject(new Error(`Connection reset: Network instability detected`));
} else {
reject(new Error(`Network error: ${error.message}`));
}
});
req.on('timeout', () => {
req.destroy();
reject(new Error('Request timeout - backend may be unavailable'));
});
if (method !== 'GET' && data) {
req.write(JSON.stringify(data));
}
req.end();
});
}
formatOptimizationResult(result, context) {
let output = `# 🎯 Optimized Prompt\n\n${result.optimized_prompt}\n\n`;
output += `**Confidence:** ${(result.confidence_score * 100).toFixed(1)}%\n`;
output += `**AI Context:** ${context.detectedContext}\n`;
if (result.template_saved) {
output += `\n📁 **Template Auto-Save**\n✅ Automatically saved as template (ID: \`${result.template_id}\`)\n*Confidence threshold: >70% required for auto-save*\n`;
}
if (result.templates_found?.length) {
output += `\n📋 **Similar Templates Found**\nFound **${result.templates_found.length}** similar template(s):\n`;
result.templates_found.slice(0, 3).forEach(t => {
output += `- ${t.title} (${(t.confidence_score * 100).toFixed(1)}% match)\n`;
});
}
if (result.optimization_insights) {
const metrics = result.optimization_insights.improvement_metrics;
if (metrics) {
output += `\n📊 **Optimization Insights**\n`;
if (metrics.clarity_improvement) output += `- Clarity: +${(metrics.clarity_improvement * 100).toFixed(1)}%\n`;
if (metrics.specificity_improvement) output += `- Specificity: +${(metrics.specificity_improvement * 100).toFixed(1)}%\n`;
if (metrics.context_alignment) output += `- Context Alignment: +${(metrics.context_alignment * 100).toFixed(1)}%\n`;
}
if (result.optimization_insights.recommendations?.length) {
output += `\n💡 **Recommendations:**\n`;
result.optimization_insights.recommendations.forEach(rec => {
output += `- ${rec}\n`;
});
}
}
// Add Bayesian insights if available
if (result.bayesian_insights && context.enableBayesian) {
output += `\n🧠 **Bayesian Optimization Insights**\n`;
const bayesian = result.bayesian_insights;
if (bayesian.parameter_optimization) {
output += `**Parameter Tuning:**\n`;
if (bayesian.parameter_optimization.temperature_adjustment) {
output += `- Temperature: ${bayesian.parameter_optimization.temperature_adjustment}\n`;
}
if (bayesian.parameter_optimization.goal_prioritization) {
output += `- Goal Priority: ${bayesian.parameter_optimization.goal_prioritization}\n`;
}
}
if (bayesian.performance_prediction) {
output += `**Performance Prediction:**\n`;
output += `- Expected Improvement: ${bayesian.performance_prediction.expected_improvement}\n`;
output += `- Confidence Interval: ${bayesian.performance_prediction.confidence_interval}\n`;
}
if (bayesian.next_optimization_recommendation) {
output += `**Next Optimization:**\n`;
output += `- Suggested Goals: ${bayesian.next_optimization_recommendation.suggested_goals.join(', ')}\n`;
output += `- Estimated Improvement: ${bayesian.next_optimization_recommendation.estimated_improvement}\n`;
}
}
if (result.fallback_mode) {
output += `\n⚠️ **Fallback Mode Active**\n**Issue:** ${result.error_reason}\n`;
}
output += `\n🔗 **Quick Actions**\n- Dashboard: https://promptoptimizer-blog.vercel.app/dashboard\n- Analytics: https://promptoptimizer-blog.vercel.app/analytics\n`;
return output;
}
formatQuotaStatus(result) {
let output = `# 📊 Account Status\n\n**Plan:** ${result.tier || 'explorer'}\n`;
const quota = result.quota || {};
if (quota.unlimited) {
output += `**Usage:** 🟢 Unlimited\n`;
} else {
const used = quota.used || 0;
const limit = quota.limit || 5000;
const percentage = limit > 0 ? ((used / limit) * 100).toFixed(1) : 0;
let statusIcon = '🟢';
if (percentage >= 90) statusIcon = '🔴';
else if (percentage >= 75) statusIcon = '🟡';
output += `**Usage:** ${statusIcon} ${used}/${limit} (${percentage}%)\n`;
}
output += `\n## ✨ **Available Features**\n`;
if (result.features) {
if (result.features.optimization) output += `✅ Prompt Optimization\n`;
if (result.features.template_search) output += `✅ Template Search & Management\n`;
if (result.features.template_auto_save) output += `✅ Template Auto-Save\n`;
if (result.features.optimization_insights) output += `✅ Optimization Insights\n`;
if (this.bayesianOptimizationEnabled) output += `🧠 Bayesian Optimization\n`;
if (this.aguiFeatures) output += `⚡ AG-UI Real-time Features\n`;
}
if (result.mode) {
output += `\n## 🔧 **Mode Status**\n`;
if (result.mode.development) output += `⚙️ Development Mode\n`;
if (result.mode.mock) output += `🎭 Mock Mode\n`;
if (result.mode.fallback) output += `🔄 Fallback Mode\n`;
if (result.mode.offline) output += `📱 Offline Mode\n`;
}
output += `\n## 🔗 **Account Management**\n`;
output += `- Dashboard: https://promptoptimizer-blog.vercel.app/dashboard\n`;
output += `- Analytics: https://promptoptimizer-blog.vercel.app/analytics\n`;
output += `- Upgrade: https://promptoptimizer-blog.vercel.app/pricing\n`;
return output;
}
formatTemplateSearchResults(result, originalArgs) {
let output = `# 🔍 Template Search Results\n\n`;
if (originalArgs.query || originalArgs.ai_context || originalArgs.sophistication_level) {
output += `**Search Criteria:**\n`;
if (originalArgs.query) output += `- Query: "${originalArgs.query}"\n`;
if (originalArgs.ai_context) output += `- AI Context: ${originalArgs.ai_context}\n`;
if (originalArgs.sophistication_level) output += `- Sophistication: ${originalArgs.sophistication_level}\n`;
if (originalArgs.complexity_level) output += `- Complexity: ${originalArgs.complexity_level}\n`;
output += `\n`;
}
output += `Found **${result.total}** template(s)\n\n`;
if (!result.templates || result.templates.length === 0) {
output += `📭 **No Templates Found**\n`;
if (originalArgs.query) {
output += `Try searching with different keywords or remove filters.\n`;
} else {
output += `You don't have any saved templates yet. Templates are automatically saved when optimization confidence is >70%.\n`;
}
} else {
output += `## 📋 **Template Results**\n`;
result.templates.forEach((t, index) => {
const confidence = t.confidence_score ? `${(t.confidence_score * 100).toFixed(1)}%` : 'N/A';
const preview = t.optimized_prompt ? t.optimized_prompt.substring(0, 60) + '...' : 'Preview unavailable';
output += `### ${index + 1}. ${t.title}\n`;
output += `- **Confidence:** ${confidence}\n`;
output += `- **ID:** \`${t.id}\`\n`;
output += `- **Preview:** ${preview}\n`;
if (t.ai_context) output += `- **Context:** ${t.ai_context}\n`;
if (t.optimization_goals && t.optimization_goals.length) {
output += `- **Goals:** ${t.optimization_goals.join(', ')}\n`;
}
output += `\n`;
});
output += `## 💡 **Template Usage Guide**\n`;
output += `- Copy prompts for immediate use\n`;
output += `- Use template IDs to reference specific templates\n`;
output += `- High-confidence templates (>80%) are most reliable\n`;
}
if (result.fallback_mode) {
output += `\n⚠️ **Search Temporarily Unavailable**\n${result.message}\n`;
}
return output;
}
formatOptimizationInsights(insights) {
let output = `# 🧠 Bayesian Optimization Insights\n\n`;
if (insights.bayesian_status) {
const status = insights.bayesian_status;
output += `## 📊 **Status Overview**\n`;
output += `- **Status:** ${status.optimization_active ? '🟢 Active' : '🔴 Inactive'}\n`;
output += `- **Total Optimizations:** ${status.total_optimizations}\n`;
output += `- **Improvement Rate:** ${status.improvement_rate}\n`;
output += `- **System Confidence:** ${(status.confidence_score * 100).toFixed(1)}%\n\n`;
}
if (insights.parameter_insights) {
const params = insights.parameter_insights;
output += `## 🎯 **Parameter Analysis**\n`;
if (params.most_effective_goals) {
output += `**Most Effective Goals:**\n`;
params.most_effective_goals.forEach(goal => {
output += `- ${goal}\n`;
});
output += `\n`;
}
if (params.context_performance) {
output += `**Context Performance:**\n`;
Object.entries(params.context_performance).forEach(([context, score]) => {
const percentage = (score * 100).toFixed(1);
const icon = score >= 0.9 ? '🟢' : score >= 0.8 ? '🟡' : '🔴';
output += `- ${context}: ${icon} ${percentage}%\n`;
});
output += `\n`;
}
if (params.optimization_trends) {
output += `**Trends:** ${params.optimization_trends}\n\n`;
}
}
if (insights.recommendations && insights.recommendations.length) {
output += `## 💡 **Optimization Recommendations**\n`;
insights.recommendations.forEach((rec, index) => {
output += `${index + 1}. ${rec}\n`;
});
output += `\n`;
}
output += `## 🔗 **Advanced Analytics**\n`;
output += `- Full Analytics: https://promptoptimizer-blog.vercel.app/analytics\n`;
output += `- Performance Dashboard: https://promptoptimizer-blog.vercel.app/dashboard\n`;
return output;
}
formatRealTimeStatus(status) {
let output = `# ⚡ AG-UI Real-Time Status\n\n`;
output += `## 🚀 **Service Status**\n`;
output += `- **AG-UI Status:** ${status.agui_status === 'available' ? '🟢 Available' : '🔴 Unavailable'}\n`;
output += `- **Streaming Optimization:** ${status.streaming_optimization ? '✅ Enabled' : '❌ Disabled'}\n`;
output += `- **WebSocket Support:** ${status.websocket_support ? '✅ Enabled' : '❌ Disabled'}\n`;
output += `- **Real-time Analytics:** ${status.real_time_analytics ? '✅ Enabled' : '❌ Disabled'}\n\n`;
if (status.active_optimizations !== undefined) {
output += `## 📈 **Current Activity**\n`;
output += `- **Active Optimizations:** ${status.active_optimizations}\n`;
output += `- **Average Response Time:** ${status.average_response_time}\n\n`;
}
if (status.features) {
const features = status.features;
output += `## ⚡ **Available Features**\n`;
if (features.live_optimization) output += `✅ Live Optimization\n`;
if (features.collaborative_editing) output += `✅ Collaborative Editing\n`;
if (features.instant_feedback) output += `✅ Instant Feedback\n`;
if (features.performance_monitoring) output += `✅ Performance Monitoring\n`;
output += `\n`;
}
output += `## 🔗 **Real-Time Access**\n`;
output += `- Live Dashboard: https://promptoptimizer-blog.vercel.app/live\n`;
output += `- WebSocket Endpoint: Available via API\n`;
return output;
}
async run() {
const transport = new StdioServerTransport();
await this.server.connect(transport);
}
}
async function startValidatedMCPServer() {
console.log(`🚀 MCP Prompt Optimizer - Professional Cloud Server v${packageJson.version}\n`);
console.log(`🧠 Bayesian Optimization: ${process.env.ENABLE_BAYESIAN_OPTIMIZATION === 'true' ? 'Enabled' : 'Disabled'}`);
console.log(`⚡ AG-UI Features: ${process.env.ENABLE_AGUI_FEATURES === 'true' ? 'Enabled' : 'Disabled'}\n`);
try {
const apiKey = process.env.OPTIMIZER_API_KEY;
if (!apiKey) {
console.error('❌ API key required. Get one at https://promptoptimizer-blog.vercel.app/pricing');
process.exit(1);
}
const manager = new CloudApiKeyManager(apiKey, { developmentMode: process.env.OPTIMIZER_DEV_MODE === 'true' });
console.log('🔧 Validating API key...\n');
const validation = await manager.validateAndPrepare();
console.log('🔧 Starting MCP server...\n');
const mcpServer = new MCPPromptOptimizer();
console.log('✅ MCP server ready for connections');
// Enhanced status display
const quotaDisplay = validation.quotaStatus.unlimited ?
'Unlimited' :
`${validation.quotaStatus.remaining}/${validation.quotaStatus.limit} remaining`;
console.log(`📊 Plan: ${validation.tier} | Quota: ${quotaDisplay}`);
if (validation.mode.mock) console.log('🎭 Running in mock mode');
if (validation.mode.development) console.log('⚙️ Development mode active');
if (validation.mode.fallback) console.log('🔄 Fallback mode active');
if (validation.mode.offline) console.log('📱 Offline mode active');
await mcpServer.run();
} catch (error) {
console.error(`❌ Failed to start MCP server: ${error.message}`);
process.exit(1);
}
}
if (require.main === module) {
startValidatedMCPServer();
}
module.exports = { MCPPromptOptimizer };