UNPKG

@hivetechs/hive-ai

Version:

Real-time streaming AI consensus platform with HTTP+SSE MCP integration for Claude Code, VS Code, Cursor, and Windsurf - powered by OpenRouter's unified API

1,096 lines 46.8 kB
/** * Model Context Protocol (MCP) Server for Hive AI - HTTP+SSE Transport * * This server exposes Hive AI consensus, analytics, and profile management * capabilities to AI agents and tools through modern HTTP+SSE streaming. * * Supports all major IDEs: Claude Code, Cursor, Windsurf, VS Code * MCP Specification: https://modelcontextprotocol.io/ */ import express from 'express'; import { spawn } from 'child_process'; import { v4 as uuidv4 } from 'uuid'; import { setConfig } from '../storage/unified-database.js'; import { getCurrentVersion } from '../utils/version-utils.js'; import { MCPPortManager } from '../tools/mcp-port-manager.js'; /** * Hive AI MCP Server Manager - HTTP+SSE Transport (Stateless) * * Stateless MCP server following 2025-03-26 specification. * No persistent sessions - each request is handled independently. */ class HiveAIMCPServer { httpServer = null; serverInstance = null; port; isRunning = false; portManager; constructor(port) { // Use higher port range to avoid Windows Firewall issues and corporate restrictions // Ports below 1024 often require admin, ports 3000-5000 may be blocked by corporate firewalls this.port = port || 8765; // Default to higher, less restricted port this.portManager = new MCPPortManager({ defaultPort: this.port, portRange: { min: 8765, max: 8775 } // Higher port range for enterprise compatibility }); } /** * Validate Origin header for security (2025-03-26 MCP spec requirement) */ isValidOrigin(origin) { try { const url = new URL(origin); // Allow localhost and 127.0.0.1 on any port for local development if (url.hostname === 'localhost' || url.hostname === '127.0.0.1') { return true; } // Allow known Claude Desktop origins if (url.hostname.includes('claude.ai') || url.hostname.includes('anthropic.com')) { return true; } // Allow other known IDE origins (VS Code, Cursor, etc.) const allowedHosts = [ 'vscode-webview.net', 'vscode-app.com', 'cursor.sh', 'windsurf.app' ]; return allowedHosts.some(host => url.hostname.includes(host)); } catch { return false; } } /** * Initialize the HTTP+SSE MCP server */ async initialize() { if (this.httpServer) { return; // Already initialized } this.httpServer = express(); this.setupHTTPServer(); this.setupErrorHandling(); } /** * Set up HTTP server with MCP endpoints following 2025-03-26 specification */ setupHTTPServer() { if (!this.httpServer) return; // Security middleware - MUST validate Origin header (2025-03-26 spec) this.httpServer.use((req, res, next) => { const origin = req.headers.origin; // Validate Origin header to prevent DNS rebinding attacks if (origin && !this.isValidOrigin(origin)) { return res.status(403).json({ error: 'Invalid origin', message: 'Origin validation failed for security' }); } // Set security headers res.setHeader('X-Content-Type-Options', 'nosniff'); res.setHeader('X-Frame-Options', 'DENY'); res.setHeader('Access-Control-Allow-Origin', origin || 'http://localhost:*'); res.setHeader('Access-Control-Allow-Methods', 'POST, GET, OPTIONS'); res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization'); next(); }); // Handle preflight requests this.httpServer.options('*', (req, res) => { res.status(200).end(); }); // Core middleware this.httpServer.use(express.json({ limit: '10mb' })); this.httpServer.use(express.text({ type: 'text/event-stream', limit: '10mb' })); // MCP Tool Definitions - All 18 Hive AI tools const MCP_TOOLS = [ { name: 'hive_test', description: 'Test MCP connectivity and verify Hive AI is working properly', inputSchema: { type: 'object', properties: { message: { type: 'string', description: 'Test message to echo back' } }, required: ['message'] } }, { name: 'hive_setup', description: 'Quick setup for Hive AI (configure license and API key)', inputSchema: { type: 'object', properties: { license_key: { type: 'string', description: 'Your Hive AI license key' }, openrouter_api_key: { type: 'string', description: 'Your OpenRouter API key (starts with sk-or-)' } }, required: ['license_key', 'openrouter_api_key'] } }, { name: 'hive_status', description: 'Check Hive AI configuration status and system health', inputSchema: { type: 'object', properties: {} } }, { name: 'hive_consensus', description: 'Run 4-stage AI consensus on a question - equivalent to "hive your-question" in CLI', inputSchema: { type: 'object', properties: { question: { type: 'string', description: 'The question to analyze through consensus' }, profile: { type: 'string', enum: ['Consensus_Elite', 'Consensus_Balanced', 'Consensus_Speed', 'Consensus_Cost'], default: 'Consensus_Balanced', description: 'Consensus profile to use' } }, required: ['question'] } }, { name: 'hive', description: 'Ask Hive AI anything - just like "hive question" in CLI. Claude will auto-use this when you say "hive [question]"', inputSchema: { type: 'object', properties: { question: { type: 'string', description: 'Any question you want to ask Hive AI' } }, required: ['question'] } }, { name: 'hive_config_apply', description: 'Apply a YAML or JSON configuration file to Hive AI - perfect for Infrastructure as Code workflows', inputSchema: { type: 'object', properties: { file_path: { type: 'string', description: 'Path to the YAML or JSON configuration file' }, environment: { type: 'string', enum: ['development', 'staging', 'production'], description: 'Target environment for the configuration' } }, required: ['file_path'] } }, { name: 'hive_config_export', description: 'Export current Hive AI configuration as YAML or JSON for Infrastructure as Code', inputSchema: { type: 'object', properties: { format: { type: 'string', enum: ['yaml', 'json'], default: 'yaml', description: 'Output format for the configuration' }, include_credentials: { type: 'boolean', default: false, description: 'Whether to include API keys (use with caution)' } } } }, { name: 'hive_config_validate', description: 'Validate a Hive AI configuration file before applying it', inputSchema: { type: 'object', properties: { file_path: { type: 'string', description: 'Path to the configuration file to validate' } }, required: ['file_path'] } }, { name: 'hive_config_template', description: 'Generate configuration templates for different deployment scenarios', inputSchema: { type: 'object', properties: { type: { type: 'string', enum: ['basic', 'enterprise', 'development', 'ci-cd'], default: 'basic', description: 'Type of configuration template to generate' } } } }, { name: 'hive_environments', description: 'Manage multiple Hive AI environments (dev, staging, production) with different configurations', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['list', 'create', 'switch', 'delete', 'clone'], default: 'list', description: 'Environment management action' }, environment_name: { type: 'string', description: 'Name of the environment (required for create, switch, delete)' }, source_environment: { type: 'string', description: 'Source environment for cloning' } } } }, { name: 'hive_models', description: 'Explore and manage 320+ AI models from 55+ providers available through Hive AI', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['list', 'search', 'info', 'benchmark', 'compare'], default: 'list', description: 'Model management action' }, query: { type: 'string', description: 'Search query or model name' }, provider: { type: 'string', description: 'Filter by provider (e.g., openai, anthropic, google)' } } } }, { name: 'hive_templates', description: 'Access expert templates for coding, research, creative writing, and specialized tasks', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['list', 'use', 'create', 'edit', 'delete'], default: 'list', description: 'Template management action' }, category: { type: 'string', enum: ['coding', 'research', 'creative', 'business', 'analysis'], description: 'Template category' }, template_name: { type: 'string', description: 'Name of the template' } } } }, { name: 'hive_cost', description: 'Monitor and optimize AI usage costs with budget alerts and spending analytics', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['summary', 'breakdown', 'budget', 'optimize', 'forecast'], default: 'summary', description: 'Cost management action' }, timeframe: { type: 'string', enum: ['day', 'week', 'month', 'quarter'], default: 'month', description: 'Time period for cost analysis' } } } }, { name: 'hive_backup', description: 'Create and manage backups of your Hive AI configurations, profiles, and data', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['create', 'restore', 'list', 'delete', 'schedule'], default: 'create', description: 'Backup management action' }, backup_name: { type: 'string', description: 'Name for the backup' }, include_data: { type: 'boolean', default: true, description: 'Include conversation data in backup' } } } }, { name: 'hive_health', description: 'Monitor system health, API connectivity, and performance metrics', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['check', 'detailed', 'history', 'alerts'], default: 'check', description: 'Health monitoring action' }, component: { type: 'string', enum: ['api', 'database', 'models', 'all'], default: 'all', description: 'System component to check' } } } }, { name: 'hive_performance', description: 'Analyze performance metrics, identify bottlenecks, and optimize AI workflows', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['analyze', 'benchmark', 'compare', 'optimize', 'report'], default: 'analyze', description: 'Performance analysis action' }, metric: { type: 'string', enum: ['speed', 'accuracy', 'cost', 'reliability'], description: 'Performance metric to focus on' } } } }, { name: 'hive_reports', description: 'Generate comprehensive business intelligence reports and executive dashboards', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['generate', 'schedule', 'export', 'dashboard'], default: 'generate', description: 'Reporting action' }, report_type: { type: 'string', enum: ['usage', 'cost', 'performance', 'roi', 'executive'], default: 'usage', description: 'Type of report to generate' }, format: { type: 'string', enum: ['pdf', 'csv', 'json', 'html'], default: 'pdf', description: 'Report output format' } } } }, { name: 'hive_intelligence', description: 'Access advanced AI capabilities including pattern recognition, trend analysis, and predictive insights', inputSchema: { type: 'object', properties: { action: { type: 'string', enum: ['analyze', 'predict', 'optimize', 'insights', 'patterns'], default: 'analyze', description: 'Intelligence analysis action' }, data_source: { type: 'string', description: 'Data source or topic to analyze' }, analysis_type: { type: 'string', enum: ['trends', 'patterns', 'anomalies', 'predictions'], default: 'trends', description: 'Type of analysis to perform' } } } } ]; // Service Discovery: /.well-known/mcp endpoint (2025 best practice) this.httpServer.get('/.well-known/mcp', (req, res) => { res.json({ protocol: 'mcp', version: '2025-03-26', capabilities: { tools: { listChanged: true }, resources: {}, prompts: {}, logging: {} }, endpoints: { mcp: '/mcp', health: '/health' }, transport: 'streamable-http', streaming: true, description: 'Hive AI MCP Server - Real-time streaming consensus platform' }); }); // MCP Streamable HTTP endpoint (2025-03-26 spec) - Stateless // Main MCP endpoint supporting both POST and GET this.httpServer.all('/mcp', async (req, res) => { try { // Generate request ID for tracking (stateless) const requestId = req.headers['mcp-request-id'] || uuidv4(); // Set required headers for 2025-03-26 spec (stateless) res.setHeader('Mcp-Request-Id', requestId); res.setHeader('Cache-Control', 'no-cache'); // Handle GET request for SSE streaming (2025-03-26 spec) - Stateless if (req.method === 'GET') { // Set up Server-Sent Events for streaming (stateless) res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Connection', 'keep-alive'); res.setHeader('Access-Control-Allow-Credentials', 'true'); // Send initial connection event (stateless) let eventId = 1; res.write(`id: ${eventId}\n`); res.write(`event: connect\n`); res.write(`data: {"type":"connection","requestId":"${requestId}","timestamp":${Date.now()}}\n\n`); // Keep connection alive with heartbeat (stateless) const heartbeat = setInterval(() => { if (!res.destroyed) { eventId++; res.write(`id: ${eventId}\n`); res.write(`event: heartbeat\n`); res.write(`data: {"type":"heartbeat","timestamp":${Date.now()}}\n\n`); } else { clearInterval(heartbeat); } }, 30000); // 30 second heartbeat // Handle client disconnect (stateless) req.on('close', () => { clearInterval(heartbeat); }); return; // Keep connection open for streaming } // Handle initialization if (req.body.method === 'initialize') { const response = { jsonrpc: '2.0', id: req.body.id, result: { protocolVersion: '2025-03-26', capabilities: { tools: { listChanged: true }, resources: {}, prompts: {}, logging: {} }, serverInfo: { name: 'hive-ai', version: await getCurrentVersion() } } }; res.json(response); return; } // Handle tools/list if (req.body.method === 'tools/list') { const response = { jsonrpc: '2.0', id: req.body.id, result: { tools: MCP_TOOLS } }; res.json(response); return; } // Handle tool calls if (req.body.method === 'tools/call') { const { name, arguments: args } = req.body.params; // Check if client accepts streaming const acceptHeader = req.headers.accept || ''; const supportsStreaming = acceptHeader.includes('text/event-stream'); // For streaming tools, use real-time consensus engine if (supportsStreaming && ['hive_consensus', 'hive'].includes(name)) { await this.executeStreamingConsensus(name, args, res, req.body.id); return; } // For non-streaming tools, use standard JSON response try { const result = await this.executeHiveCommand(name, args); const response = { jsonrpc: '2.0', id: req.body.id, result: { content: [ { type: 'text', text: result || 'Command executed successfully' } ] } }; res.json(response); } catch (error) { const errorResponse = { jsonrpc: '2.0', id: req.body.id, error: { code: -32000, message: `Tool execution failed: ${error instanceof Error ? error.message : String(error)}` } }; res.status(500).json(errorResponse); } return; } // Handle unknown methods const errorResponse = { jsonrpc: '2.0', id: req.body.id, error: { code: -32601, message: `Method not found: ${req.body.method}` } }; res.status(404).json(errorResponse); } catch (error) { const errorResponse = { jsonrpc: '2.0', id: req.body.id || null, error: { code: -32603, message: `Internal error: ${error instanceof Error ? error.message : String(error)}` } }; res.status(500).json(errorResponse); } }); // Health check endpoint (stateless) this.httpServer.get('/health', async (req, res) => { const version = await getCurrentVersion(); res.json({ status: 'healthy', server: 'hive-ai-mcp', version, transport: 'streamable-http', port: this.port, stateless: true, timestamp: new Date().toISOString() }); }); } /** * Execute Hive CLI command with real-time SSE streaming */ async executeHiveCommandWithStreaming(toolName, args, res) { return new Promise((resolve, reject) => { const command = this.buildHiveCommand(toolName, args); const childProcess = spawn('hive', command, { stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, FORCE_COLOR: '1' } }); let output = ''; // Set up SSE headers for streaming res.writeHead(200, { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Headers': 'Cache-Control' }); // Send initial SSE event res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/progress', params: { progress: 0, message: 'Starting Hive AI command execution...' } })}\n\n`); // Real-time stdout streaming childProcess.stdout.on('data', (data) => { const chunk = data.toString(); output += chunk; // Send real-time progress via SSE res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/message', params: { type: 'stdout', content: chunk } })}\n\n`); }); // Real-time stderr streaming childProcess.stderr.on('data', (data) => { const chunk = data.toString(); output += chunk; // Send real-time progress via SSE res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/message', params: { type: 'stderr', content: chunk } })}\n\n`); }); // Handle completion childProcess.on('close', (code) => { // Send final result via SSE res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/result', params: { success: code === 0, output: output, exitCode: code } })}\n\n`); res.end(); resolve(); }); childProcess.on('error', (error) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/error', params: { error: error.message } })}\n\n`); res.end(); reject(error); }); }); } /** * Execute Hive CLI command without streaming */ async executeHiveCommand(toolName, args) { return new Promise((resolve, reject) => { const command = this.buildHiveCommand(toolName, args); const childProcess = spawn('hive', command, { stdio: ['pipe', 'pipe', 'pipe'], env: { ...process.env, FORCE_COLOR: '1' } }); let output = ''; childProcess.stdout.on('data', (data) => { output += data.toString(); }); childProcess.stderr.on('data', (data) => { output += data.toString(); }); childProcess.on('close', (code) => { if (code === 0) { resolve(output); } else { reject(new Error(`Command failed with exit code ${code}: ${output}`)); } }); childProcess.on('error', (error) => { reject(error); }); }); } /** * Build Hive CLI command from tool name and arguments */ buildHiveCommand(toolName, args) { const commandMap = { 'hive_test': ['--version'], 'hive_setup': ['setup'], 'hive_status': ['status'], 'hive_consensus': ['consensus', args.question || ''], 'hive': [args.question || ''], 'hive_config_apply': ['config', 'apply', args.file_path || ''], 'hive_config_export': ['config', 'export', args.format || 'yaml'], 'hive_config_validate': ['config', 'validate', args.file_path || ''], 'hive_config_template': ['config', 'template', args.type || 'basic'], 'hive_environments': ['environments', args.action || 'list'], 'hive_models': ['models', args.action || 'list'], 'hive_templates': ['templates', args.action || 'list'], 'hive_cost': ['cost', args.action || 'summary'], 'hive_backup': ['backup', args.action || 'create'], 'hive_health': ['health', args.action || 'check'], 'hive_performance': ['performance', args.action || 'analyze'], 'hive_reports': ['reports', args.action || 'generate'], 'hive_intelligence': ['intelligence', args.action || 'analyze'] }; return commandMap[toolName] || [args.question || '']; } /** * Execute streaming consensus using direct engine (real-time SSE) */ async executeStreamingConsensus(toolName, args, res, requestId) { try { // Set up SSE headers for streaming res.writeHead(200, { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Headers': 'Cache-Control' }); // Send initial response with request ID const initialResponse = { jsonrpc: '2.0', id: requestId, method: 'notifications/started', params: { message: 'Starting real-time consensus streaming...', requestId: requestId } }; res.write(`data: ${JSON.stringify(initialResponse)}\n\n`); // Import consensus engine const { runConsensusPipeline } = await import('../tools/enhanced-consensus-engine.js'); const { createConversation } = await import('../storage/unified-database.js'); // Create conversation for tracking const conversationId = await createConversation('mcp-streaming', args.question || ''); // Set up streaming callbacks const callbacks = { onStageStart: (stageName, model) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/stage-start', params: { stage: stageName, model: model || 'unknown', requestId } })}\n\n`); }, onStageChunk: (stage, chunk, totalContent) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/chunk', params: { stage, chunk, totalContent, requestId } })}\n\n`); }, onStageComplete: (stage, result) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/stage-complete', params: { stage, result: result.answer, duration: result.duration, requestId } })}\n\n`); }, onPipelineProgress: (currentStage, totalStages, stageName) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', method: 'notifications/progress', params: { progress: Math.round((currentStage / totalStages) * 100), currentStage, totalStages, stageName, requestId } })}\n\n`); }, onComplete: (finalResult, allStages) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', id: requestId, result: { content: [{ type: 'text', text: finalResult }] } })}\n\n`); res.end(); }, onError: (stage, error) => { res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', id: requestId, error: { code: -32000, message: `Stage ${stage} failed: ${error.message}` } })}\n\n`); res.end(); } }; // Run consensus pipeline with SSE output format for MCP streaming try { const finalResult = await runConsensusPipeline(args.question || '', conversationId, undefined, { outputFormat: 'sse', enableProgress: true, showTokenProgress: true }); // Send completion response res.write(`data: ${JSON.stringify({ jsonrpc: '2.0', id: requestId, result: { content: [{ type: 'text', text: finalResult }] } })}\n\n`); res.end(); } catch (consensusError) { throw consensusError; // Re-throw to be caught by outer catch block } } catch (error) { const errorResponse = { jsonrpc: '2.0', id: requestId, error: { code: -32000, message: `Streaming consensus failed: ${error instanceof Error ? error.message : String(error)}` } }; res.write(`data: ${JSON.stringify(errorResponse)}\n\n`); res.end(); } } /** * Set up error handling for the HTTP server */ setupErrorHandling() { if (!this.httpServer) return; // Handle various exit signals process.on('SIGINT', async () => { console.error('[MCP Server] Received SIGINT, shutting down gracefully'); await this.stop(); process.exit(0); }); process.on('SIGTERM', async () => { console.error('[MCP Server] Received SIGTERM, shutting down gracefully'); await this.stop(); process.exit(0); }); // Handle uncaught exceptions process.on('uncaughtException', (error) => { console.error('[MCP Server] Uncaught exception:', error); this.stop().then(() => process.exit(1)); }); process.on('unhandledRejection', (reason, promise) => { console.error('[MCP Server] Unhandled rejection at:', promise, 'reason:', reason); this.stop().then(() => process.exit(1)); }); } /** * Start the HTTP+SSE MCP server */ async start() { try { console.error('[MCP Server] Initializing HTTP+SSE server...'); // Refresh knowledge base and clear any cached data for new versions try { const { ensureKnowledgeBaseSeeded } = await import('../tools/hiveai/knowledge-base-seeder.js'); await ensureKnowledgeBaseSeeded(); } catch (error) { // Silent fail - not critical for server startup } // Initialize the server first await this.initialize(); if (!this.httpServer) { throw new Error('Failed to initialize HTTP server'); } // Use port manager to find the best port const { port, reason } = await this.portManager.getBestPort(); this.port = port; console.error(`[MCP Server] Selected port ${this.port} (${reason})`); console.error(`[MCP Server] Starting server on port ${this.port}...`); // Validate port before starting const validation = await this.portManager.validatePort(this.port); if (!validation.valid) { throw new Error(`Port validation failed: ${validation.error}`); } // Start the HTTP server with proper promise handling await new Promise((resolve, reject) => { // MUST bind only to localhost (2025-03-26 MCP spec security requirement) this.serverInstance = this.httpServer.listen(this.port, '127.0.0.1', () => { this.isRunning = true; console.error(`🐝 Hive AI MCP Server (HTTP+SSE) running on port ${this.port}`); console.error(`🌊 SSE Streaming enabled for real-time consensus output`); console.error(`🔗 MCP endpoint: http://localhost:${this.port}/mcp`); console.error(`❤️ Health check: http://localhost:${this.port}/health`); console.error(`🔧 Ready for IDE integration`); resolve(); }); this.serverInstance.on('error', async (err) => { console.error(`[MCP Server] Startup error:`, err); // Handle port conflicts automatically (bulletproof reliability) if (err.code === 'EADDRINUSE') { try { console.error(`[MCP Server] Port ${this.port} in use, finding alternative...`); const alternativePort = await this.portManager.findAvailablePort(); this.port = alternativePort; console.error(`[MCP Server] Retrying on port ${this.port}...`); // Retry with new port this.serverInstance = this.httpServer.listen(this.port, '127.0.0.1', () => { this.isRunning = true; console.error(`🐝 Hive AI MCP Server (HTTP+SSE) running on port ${this.port}`); console.error(`🌊 SSE Streaming enabled for real-time consensus output`); console.error(`🔗 MCP endpoint: http://localhost:${this.port}/mcp`); console.error(`❤️ Health check: http://localhost:${this.port}/health`); console.error(`🔧 Ready for IDE integration`); resolve(); }); this.serverInstance.on('error', (retryErr) => { reject(retryErr); }); } catch (portError) { reject(new Error(`All ports in range are in use: ${portError instanceof Error ? portError.message : String(portError)}`)); } } else { reject(err); } }); // Add timeout with automatic retry setTimeout(() => { console.error(`[MCP Server] Startup timeout, retrying...`); reject(new Error('Server startup timeout - will retry')); }, 10000); // Increased timeout for reliability }); // Store the final port configuration in database await this.portManager.savePortConfiguration(this.port, false); await setConfig('mcp_transport_type', 'http+sse'); await setConfig('mcp_server_running', 'true'); } catch (error) { console.error('[MCP Server] Failed to start:', error); throw error; } } /** * Stop the HTTP+SSE MCP server */ async stop() { try { if (this.serverInstance && this.isRunning) { console.error('[MCP Server] Stopping HTTP+SSE server...'); return new Promise((resolve) => { this.serverInstance.close(() => { this.isRunning = false; console.error('[MCP Server] HTTP+SSE server stopped (stateless)'); resolve(); }); }); } } catch (error) { console.error('[MCP Server] Error stopping server:', error); throw error; } } /** * Check if the server is running */ isServerRunning() { return this.isRunning; } /** * Get server status and health information */ async getStatus() { const portReport = await this.portManager.getPortReport(); const version = await getCurrentVersion(); return { running: this.isRunning, port: this.port, transport: 'http+sse', stateless: true, version, capabilities: ['streaming', 'real-time', 'multi-ide', 'port-management', 'stateless'], endpoints: { mcp: `http://localhost:${this.port}/mcp`, health: `http://localhost:${this.port}/health`, discovery: `http://localhost:${this.port}/.well-known/mcp` }, portManagement: { currentPort: this.port, defaultPort: portReport.defaultPort, availablePorts: portReport.availablePorts, configuration: portReport.configuration } }; } /** * Get detailed port management report */ async getPortReport() { return await this.portManager.getPortReport(); } /** * Resolve any port conflicts */ async resolvePortConflicts() { return await this.portManager.resolvePortConflict(); } /** * Change the server port (requires restart) */ async changePort(newPort) { // Validate the new port const validation = await this.portManager.validatePort(newPort); if (!validation.valid) { return { success: false, message: validation.error || 'Port validation failed' }; } // Save the new port configuration await this.portManager.savePortConfiguration(newPort, false); return { success: true, message: `Port changed to ${newPort}. Restart the MCP server to apply the change.` }; } } export { HiveAIMCPServer }; //# sourceMappingURL=server.js.map