UNPKG

auto-publishing-mcp-server

Version:

Enterprise-grade MCP Server for Auto-Publishing with pre-publish validation, multi-cloud deployment, and monitoring

586 lines (510 loc) 17.5 kB
/** * Auto-Scaling System * Automatic horizontal and vertical scaling based on metrics */ import Docker from 'dockerode'; import { execSync } from 'child_process'; import fs from 'fs/promises'; import path from 'path'; export class AutoScaler { constructor(config = {}) { this.docker = new Docker(); this.scalingRules = new Map(); // containerId -> scaling rules this.scalingHistory = new Map(); // containerId -> history this.isRunning = false; this.monitoringInterval = config.monitoringInterval || 30000; // 30 seconds this.configPath = config.configPath || '/root/projects/auto-publishing/config/scaling-config.json'; // Default scaling thresholds this.defaultConfig = { cpu: { scaleUp: 70, // Scale up if CPU > 70% scaleDown: 30, // Scale down if CPU < 30% cooldown: 300000 // 5 minutes cooldown }, memory: { scaleUp: 80, // Scale up if Memory > 80% scaleDown: 40, // Scale down if Memory < 40% cooldown: 300000 }, responseTime: { scaleUp: 1000, // Scale up if response time > 1000ms scaleDown: 200, // Scale down if response time < 200ms cooldown: 300000 }, replicas: { min: 1, max: 10, increment: 1 } }; } /** * Start auto-scaling monitoring */ async startAutoScaling(args = {}) { const { containers = [], globalConfig = {} } = args; if (this.isRunning) { return { output: 'Auto-scaling is already running', data: { status: 'already_running' } }; } try { // Load or create scaling configuration await this.loadScalingConfig(); // Register containers for scaling for (const container of containers) { await this.registerContainer(container); } // Start monitoring loop this.isRunning = true; this.monitoringLoop(); console.log('🔄 Auto-scaling system started'); return { output: 'Auto-scaling system started successfully', data: { status: 'started', containers: containers.length, monitoringInterval: this.monitoringInterval, timestamp: new Date().toISOString() } }; } catch (error) { throw new Error(`Failed to start auto-scaling: ${error.message}`); } } /** * Stop auto-scaling monitoring */ async stopAutoScaling() { if (!this.isRunning) { return { output: 'Auto-scaling is not running', data: { status: 'not_running' } }; } this.isRunning = false; console.log('🛑 Auto-scaling system stopped'); return { output: 'Auto-scaling system stopped', data: { status: 'stopped', timestamp: new Date().toISOString() } }; } /** * Register container for auto-scaling */ async registerContainer(containerConfig) { const { containerId, scalingType = 'horizontal', // horizontal or vertical thresholds = {}, kubernetes = false, namespace = 'default' } = containerConfig; const config = { ...this.defaultConfig, ...thresholds, scalingType, kubernetes, namespace, lastScaled: 0, currentReplicas: kubernetes ? await this.getKubernetesReplicas(containerId, namespace) : 1 }; this.scalingRules.set(containerId, config); this.scalingHistory.set(containerId, []); console.log(`📋 Registered ${containerId} for ${scalingType} scaling`); return { output: `Container ${containerId} registered for auto-scaling`, data: { containerId, scalingType, config } }; } /** * Get scaling status */ async getScalingStatus(containerId = null) { if (containerId) { const config = this.scalingRules.get(containerId); const history = this.scalingHistory.get(containerId) || []; if (!config) { throw new Error(`Container ${containerId} not registered for scaling`); } return { output: `Scaling status for ${containerId}`, data: { containerId, config, history: history.slice(-10), // Last 10 scaling events lastScaled: new Date(config.lastScaled).toISOString() } }; } else { const allContainers = Array.from(this.scalingRules.keys()).map(id => ({ containerId: id, config: this.scalingRules.get(id), recentEvents: this.scalingHistory.get(id)?.slice(-3) || [] })); return { output: `Auto-scaling status for ${allContainers.length} containers`, data: { isRunning: this.isRunning, containers: allContainers, monitoringInterval: this.monitoringInterval } }; } } /** * Monitoring loop */ async monitoringLoop() { while (this.isRunning) { try { await this.performScalingCheck(); await new Promise(resolve => setTimeout(resolve, this.monitoringInterval)); } catch (error) { console.error('Error in scaling monitoring loop:', error); await new Promise(resolve => setTimeout(resolve, this.monitoringInterval)); } } } /** * Perform scaling check for all registered containers */ async performScalingCheck() { for (const [containerId, config] of this.scalingRules) { try { const metrics = await this.collectContainerMetrics(containerId, config); const scalingDecision = this.analyzeMetrics(metrics, config); if (scalingDecision.shouldScale) { await this.executeScaling(containerId, scalingDecision, config); } } catch (error) { console.error(`Error checking scaling for ${containerId}:`, error); } } } /** * Collect container metrics */ async collectContainerMetrics(containerId, config) { const metrics = { cpu: 0, memory: 0, responseTime: 0, requestRate: 0, timestamp: Date.now() }; try { if (config.kubernetes) { // Get Kubernetes pod metrics const podMetrics = await this.getKubernetesPodMetrics(containerId, config.namespace); metrics.cpu = podMetrics.cpu; metrics.memory = podMetrics.memory; metrics.replicas = podMetrics.replicas; } else { // Get Docker container stats const container = this.docker.getContainer(containerId); const stats = await container.stats({ stream: false }); // Calculate CPU percentage const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage; const systemDelta = stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage; const numCpus = stats.cpu_stats.online_cpus || 1; metrics.cpu = (cpuDelta / systemDelta) * numCpus * 100.0; // Calculate memory percentage const memoryUsage = stats.memory_stats.usage; const memoryLimit = stats.memory_stats.limit; metrics.memory = (memoryUsage / memoryLimit) * 100; } // Get response time from health endpoint metrics.responseTime = await this.measureResponseTime(containerId, config); } catch (error) { console.error(`Error collecting metrics for ${containerId}:`, error); } return metrics; } /** * Analyze metrics and determine scaling action */ analyzeMetrics(metrics, config) { const now = Date.now(); const timeSinceLastScale = now - config.lastScaled; // Check cooldown period if (timeSinceLastScale < config.cpu.cooldown) { return { shouldScale: false, reason: 'cooldown_period' }; } const decision = { shouldScale: false, action: null, // 'scale_up' or 'scale_down' reason: null, targetReplicas: config.currentReplicas }; // Check CPU scaling conditions if (metrics.cpu > config.cpu.scaleUp) { decision.shouldScale = true; decision.action = 'scale_up'; decision.reason = `CPU usage ${metrics.cpu.toFixed(1)}% > ${config.cpu.scaleUp}%`; decision.targetReplicas = Math.min( config.currentReplicas + config.replicas.increment, config.replicas.max ); } else if (metrics.cpu < config.cpu.scaleDown && config.currentReplicas > config.replicas.min) { decision.shouldScale = true; decision.action = 'scale_down'; decision.reason = `CPU usage ${metrics.cpu.toFixed(1)}% < ${config.cpu.scaleDown}%`; decision.targetReplicas = Math.max( config.currentReplicas - config.replicas.increment, config.replicas.min ); } // Check memory scaling conditions (override CPU if more critical) if (metrics.memory > config.memory.scaleUp) { decision.shouldScale = true; decision.action = 'scale_up'; decision.reason = `Memory usage ${metrics.memory.toFixed(1)}% > ${config.memory.scaleUp}%`; decision.targetReplicas = Math.min( config.currentReplicas + config.replicas.increment, config.replicas.max ); } // Check response time scaling conditions if (metrics.responseTime > config.responseTime.scaleUp) { decision.shouldScale = true; decision.action = 'scale_up'; decision.reason = `Response time ${metrics.responseTime}ms > ${config.responseTime.scaleUp}ms`; decision.targetReplicas = Math.min( config.currentReplicas + config.replicas.increment, config.replicas.max ); } return decision; } /** * Execute scaling action */ async executeScaling(containerId, decision, config) { const scalingEvent = { timestamp: new Date().toISOString(), action: decision.action, reason: decision.reason, fromReplicas: config.currentReplicas, toReplicas: decision.targetReplicas, success: false }; try { if (config.kubernetes) { // Scale Kubernetes deployment await this.scaleKubernetesDeployment(containerId, decision.targetReplicas, config.namespace); } else { // Scale Docker containers (create/remove containers) await this.scaleDockerContainers(containerId, decision.targetReplicas, config); } // Update configuration config.currentReplicas = decision.targetReplicas; config.lastScaled = Date.now(); scalingEvent.success = true; console.log(`📈 Scaled ${containerId}: ${decision.reason} (${scalingEvent.fromReplicas}${scalingEvent.toReplicas})`); } catch (error) { scalingEvent.error = error.message; console.error(`❌ Failed to scale ${containerId}:`, error); } // Record scaling event const history = this.scalingHistory.get(containerId) || []; history.push(scalingEvent); // Keep only last 50 events if (history.length > 50) { history.splice(0, history.length - 50); } this.scalingHistory.set(containerId, history); } /** * Scale Kubernetes deployment */ async scaleKubernetesDeployment(deploymentName, replicas, namespace) { execSync(`kubectl scale deployment ${deploymentName} --replicas=${replicas} -n ${namespace}`); } /** * Scale Docker containers */ async scaleDockerContainers(containerId, targetReplicas, config) { const currentReplicas = config.currentReplicas; if (targetReplicas > currentReplicas) { // Scale up - create additional containers const container = this.docker.getContainer(containerId); const containerInfo = await container.inspect(); for (let i = currentReplicas; i < targetReplicas; i++) { const newContainerName = `${containerId}-replica-${i + 1}`; await this.docker.createContainer({ name: newContainerName, Image: containerInfo.Config.Image, Env: containerInfo.Config.Env, HostConfig: { ...containerInfo.HostConfig, PortBindings: {} // Don't bind ports for replicas }, Labels: { ...containerInfo.Config.Labels, 'autoscaling.replica': 'true', 'autoscaling.parent': containerId } }); const newContainer = this.docker.getContainer(newContainerName); await newContainer.start(); } } else if (targetReplicas < currentReplicas) { // Scale down - remove replica containers const containers = await this.docker.listContainers({ all: true, filters: { label: [`autoscaling.parent=${containerId}`] } }); const replicasToRemove = currentReplicas - targetReplicas; const containersToRemove = containers.slice(0, replicasToRemove); for (const containerInfo of containersToRemove) { const container = this.docker.getContainer(containerInfo.Id); await container.stop(); await container.remove(); } } } /** * Get Kubernetes pod metrics */ async getKubernetesPodMetrics(deploymentName, namespace) { try { const output = execSync( `kubectl top pods -l app=${deploymentName} -n ${namespace} --no-headers`, { encoding: 'utf8' } ); const lines = output.trim().split('\n').filter(line => line); let totalCpu = 0; let totalMemory = 0; for (const line of lines) { const parts = line.split(/\s+/); const cpu = parseInt(parts[1].replace('m', '')) / 10; // Convert millicores to percentage const memory = parseInt(parts[2].replace('Mi', '')) / 1024 * 100; // Convert to percentage totalCpu += cpu; totalMemory += memory; } return { cpu: totalCpu / lines.length, // Average CPU memory: totalMemory / lines.length, // Average memory replicas: lines.length }; } catch (error) { console.error('Error getting Kubernetes metrics:', error); return { cpu: 0, memory: 0, replicas: 1 }; } } /** * Get current Kubernetes replicas */ async getKubernetesReplicas(deploymentName, namespace) { try { const output = execSync( `kubectl get deployment ${deploymentName} -n ${namespace} -o jsonpath='{.status.replicas}'`, { encoding: 'utf8' } ); return parseInt(output) || 1; } catch (error) { return 1; } } /** * Measure response time */ async measureResponseTime(containerId, config) { try { const start = Date.now(); if (config.kubernetes) { // Use service endpoint for Kubernetes execSync(`curl -s -m 5 http://${containerId}.${config.namespace}.svc.cluster.local/health`); } else { // Use container IP for Docker const container = this.docker.getContainer(containerId); const containerInfo = await container.inspect(); const ip = containerInfo.NetworkSettings.IPAddress; execSync(`curl -s -m 5 http://${ip}:3000/health`); } return Date.now() - start; } catch (error) { return 5000; // Return high response time on error } } /** * Load scaling configuration */ async loadScalingConfig() { try { const configData = await fs.readFile(this.configPath, 'utf8'); const config = JSON.parse(configData); // Merge with defaults Object.assign(this.defaultConfig, config); } catch (error) { // Use defaults if config file doesn't exist console.log('Using default scaling configuration'); } } /** * Save scaling configuration */ async saveScalingConfig(config) { Object.assign(this.defaultConfig, config); await fs.mkdir(path.dirname(this.configPath), { recursive: true }); await fs.writeFile(this.configPath, JSON.stringify(this.defaultConfig, null, 2)); return { output: 'Scaling configuration saved', data: { configPath: this.configPath, config: this.defaultConfig } }; } /** * Manual scaling trigger */ async manualScale(args) { const { containerId, replicas, reason = 'manual' } = args; const config = this.scalingRules.get(containerId); if (!config) { throw new Error(`Container ${containerId} not registered for scaling`); } const scalingEvent = { timestamp: new Date().toISOString(), action: replicas > config.currentReplicas ? 'scale_up' : 'scale_down', reason: `Manual scaling: ${reason}`, fromReplicas: config.currentReplicas, toReplicas: replicas, success: false }; try { if (config.kubernetes) { await this.scaleKubernetesDeployment(containerId, replicas, config.namespace); } else { await this.scaleDockerContainers(containerId, replicas, config); } config.currentReplicas = replicas; config.lastScaled = Date.now(); scalingEvent.success = true; // Record event const history = this.scalingHistory.get(containerId) || []; history.push(scalingEvent); this.scalingHistory.set(containerId, history); return { output: `Manually scaled ${containerId} to ${replicas} replicas`, data: scalingEvent }; } catch (error) { scalingEvent.error = error.message; throw new Error(`Manual scaling failed: ${error.message}`); } } } export default AutoScaler;