optivise
Version:
Optivise - The Ultimate Optimizely Development Assistant with AI-powered features, zero-config setup, and comprehensive development support
699 lines • 27.8 kB
JavaScript
/**
* Production Deployment and Scaling Service
* Handles deployment orchestration, auto-scaling, health monitoring, and infrastructure management
*/
import { EventEmitter } from 'events';
import { exec } from 'child_process';
import { promisify } from 'util';
const execAsync = promisify(exec);
export class DeploymentService extends EventEmitter {
config;
logger;
instances = new Map();
activeDeployment;
deploymentHistory = [];
scalingCooldown = new Map();
healthCheckInterval;
autoscalingInterval;
constructor(logger, config) {
super();
this.logger = logger;
this.config = {
environment: 'development',
strategy: 'rolling',
scaling: {
minInstances: 1,
maxInstances: 10,
targetCpuPercent: 70,
targetMemoryPercent: 80,
scaleUpCooldown: 5 * 60 * 1000, // 5 minutes
scaleDownCooldown: 10 * 60 * 1000 // 10 minutes
},
healthCheck: {
path: '/health',
interval: 30 * 1000, // 30 seconds
timeout: 5 * 1000, // 5 seconds
retries: 3,
gracePeriod: 60 * 1000 // 1 minute
},
deployment: {
timeout: 15 * 60 * 1000, // 15 minutes
rollbackOnFailure: true,
preDeployHooks: [],
postDeployHooks: []
},
...config
};
this.startHealthChecks();
this.startAutoscaling();
this.logger.info('Deployment Service initialized', {
environment: this.config.environment,
strategy: this.config.strategy,
scaling: this.config.scaling
});
}
/**
* Deploy new version of the service
*/
async deploy(version, options) {
if (this.activeDeployment && this.activeDeployment.status === 'in-progress') {
throw new Error('Deployment already in progress');
}
const deploymentId = `deploy_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
const strategy = options?.strategy || this.config.strategy;
const targetInstances = options?.targetInstances || Math.max(this.instances.size, this.config.scaling.minInstances);
const deployment = {
id: deploymentId,
version,
strategy,
status: 'pending',
startTime: Date.now(),
progress: 0,
instances: {
target: targetInstances,
current: this.instances.size,
healthy: this.getHealthyInstanceCount(),
unhealthy: this.getUnhealthyInstanceCount()
},
phases: [
{ name: 'pre-deploy-hooks', status: 'pending' },
{ name: 'deployment', status: 'pending' },
{ name: 'health-checks', status: 'pending' },
{ name: 'post-deploy-hooks', status: 'pending' }
]
};
this.activeDeployment = deployment;
this.logger.info('Starting deployment', { deploymentId, version, strategy, targetInstances });
this.emit('deploymentStarted', deployment);
try {
deployment.status = 'in-progress';
// Phase 1: Pre-deploy hooks
if (!options?.skipHooks && this.config.deployment.preDeployHooks.length > 0) {
await this.executePhase(deployment, 'pre-deploy-hooks', async () => {
await this.executeHooks(this.config.deployment.preDeployHooks, 'pre-deploy');
});
}
else {
this.markPhaseCompleted(deployment, 'pre-deploy-hooks');
}
// Phase 2: Deployment
await this.executePhase(deployment, 'deployment', async () => {
switch (strategy) {
case 'rolling':
await this.performRollingDeployment(deployment, version);
break;
case 'blue-green':
await this.performBlueGreenDeployment(deployment, version);
break;
case 'canary':
await this.performCanaryDeployment(deployment, version);
break;
}
});
// Phase 3: Health checks
await this.executePhase(deployment, 'health-checks', async () => {
await this.waitForHealthyInstances(deployment);
});
// Phase 4: Post-deploy hooks
if (!options?.skipHooks && this.config.deployment.postDeployHooks.length > 0) {
await this.executePhase(deployment, 'post-deploy-hooks', async () => {
await this.executeHooks(this.config.deployment.postDeployHooks, 'post-deploy');
});
}
else {
this.markPhaseCompleted(deployment, 'post-deploy-hooks');
}
deployment.status = 'completed';
deployment.endTime = Date.now();
deployment.progress = 100;
this.logger.info('Deployment completed successfully', { deploymentId, version });
this.emit('deploymentCompleted', deployment);
}
catch (error) {
deployment.status = 'failed';
deployment.endTime = Date.now();
deployment.error = error.message;
this.logger.error('Deployment failed', error, { deploymentId, version });
this.emit('deploymentFailed', deployment);
// Rollback if enabled
if (this.config.deployment.rollbackOnFailure) {
await this.rollback(deploymentId);
}
}
finally {
this.deploymentHistory.push(deployment);
this.activeDeployment = undefined;
// Keep only last 50 deployments
if (this.deploymentHistory.length > 50) {
this.deploymentHistory = this.deploymentHistory.slice(-50);
}
}
return deployment;
}
/**
* Rollback to previous version
*/
async rollback(deploymentId) {
const targetDeployment = deploymentId
? this.deploymentHistory.find(d => d.id === deploymentId)
: this.deploymentHistory.find(d => d.status === 'completed');
if (!targetDeployment) {
this.logger.warn('No deployment found for rollback', { deploymentId });
return false;
}
this.logger.info('Starting rollback', { targetVersion: targetDeployment.version });
try {
// Create rollback deployment
const rollbackDeployment = await this.deploy(`rollback-${targetDeployment.version}`, {
strategy: 'rolling',
skipHooks: true
});
rollbackDeployment.status = 'rolled-back';
this.logger.info('Rollback completed', { targetVersion: targetDeployment.version });
this.emit('rollbackCompleted', { original: targetDeployment, rollback: rollbackDeployment });
return true;
}
catch (error) {
this.logger.error('Rollback failed', error, { targetVersion: targetDeployment.version });
return false;
}
}
/**
* Scale service instances
*/
async scale(targetInstances, reason) {
const currentInstances = this.instances.size;
if (targetInstances === currentInstances) {
return true;
}
// Validate scaling limits
if (targetInstances < this.config.scaling.minInstances || targetInstances > this.config.scaling.maxInstances) {
this.logger.warn('Scaling request outside limits', {
target: targetInstances,
min: this.config.scaling.minInstances,
max: this.config.scaling.maxInstances
});
return false;
}
this.logger.info('Scaling service', { from: currentInstances, to: targetInstances, reason });
try {
if (targetInstances > currentInstances) {
// Scale up
await this.scaleUp(targetInstances - currentInstances);
}
else {
// Scale down
await this.scaleDown(currentInstances - targetInstances);
}
this.emit('scaled', { from: currentInstances, to: targetInstances, reason });
return true;
}
catch (error) {
this.logger.error('Scaling failed', error, { target: targetInstances });
return false;
}
}
/**
* Get current infrastructure status
*/
getInfrastructureStatus() {
const instances = Array.from(this.instances.values());
const healthyInstances = instances.filter(i => i.status === 'healthy');
const unhealthyInstances = instances.filter(i => i.status === 'unhealthy');
const avgCpu = instances.length > 0
? instances.reduce((sum, i) => sum + i.cpu, 0) / instances.length
: 0;
const avgMemory = instances.length > 0
? instances.reduce((sum, i) => sum + i.memory, 0) / instances.length
: 0;
const lastDeployment = this.deploymentHistory.length > 0
? this.deploymentHistory[this.deploymentHistory.length - 1]
: undefined;
return {
environment: this.config.environment,
totalInstances: instances.length,
healthyInstances: healthyInstances.length,
unhealthyInstances: unhealthyInstances.length,
averageCpu: avgCpu,
averageMemory: avgMemory,
totalRequests: 0, // Would be populated from monitoring service
averageResponseTime: 0, // Would be populated from monitoring service
errorRate: 0, // Would be populated from monitoring service
lastDeployment: lastDeployment ? {
version: lastDeployment.version,
timestamp: lastDeployment.startTime,
status: lastDeployment.status
} : undefined
};
}
/**
* Get service instances
*/
getInstances() {
return Array.from(this.instances.values());
}
/**
* Get deployment history
*/
getDeploymentHistory(limit) {
return this.deploymentHistory
.sort((a, b) => b.startTime - a.startTime)
.slice(0, limit);
}
/**
* Get active deployment status
*/
getActiveDeployment() {
return this.activeDeployment;
}
/**
* Register new service instance
*/
registerInstance(instance) {
const instanceId = `instance_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
const fullInstance = {
id: instanceId,
startTime: Date.now(),
uptime: 0,
lastHealthCheck: 0,
...instance
};
this.instances.set(instanceId, fullInstance);
this.logger.info('Service instance registered', { instanceId, version: instance.version });
this.emit('instanceRegistered', fullInstance);
return instanceId;
}
/**
* Deregister service instance
*/
deregisterInstance(instanceId) {
const instance = this.instances.get(instanceId);
if (instance) {
this.instances.delete(instanceId);
this.logger.info('Service instance deregistered', { instanceId });
this.emit('instanceDeregistered', instance);
return true;
}
return false;
}
/**
* Execute deployment phase
*/
async executePhase(deployment, phaseName, executor) {
const phase = deployment.phases.find(p => p.name === phaseName);
if (!phase)
return;
phase.status = 'running';
phase.startTime = Date.now();
try {
await executor();
phase.status = 'completed';
phase.endTime = Date.now();
}
catch (error) {
phase.status = 'failed';
phase.endTime = Date.now();
phase.message = error.message;
throw error;
}
}
/**
* Mark phase as completed
*/
markPhaseCompleted(deployment, phaseName) {
const phase = deployment.phases.find(p => p.name === phaseName);
if (phase) {
phase.status = 'completed';
phase.startTime = Date.now();
phase.endTime = Date.now();
}
}
/**
* Execute deployment hooks
*/
async executeHooks(hooks, stage) {
for (const hook of hooks) {
this.logger.debug(`Executing ${stage} hook`, { hook });
try {
const { stdout, stderr } = await execAsync(hook, { timeout: 60000 });
if (stderr) {
this.logger.warn(`${stage} hook stderr`, { hook, stderr });
}
this.logger.debug(`${stage} hook completed`, { hook, stdout });
}
catch (error) {
this.logger.error(`${stage} hook failed`, error, { hook });
throw error;
}
}
}
/**
* Perform rolling deployment
*/
async performRollingDeployment(deployment, version) {
const targetInstances = deployment.instances.target;
const batchSize = Math.max(1, Math.floor(targetInstances * 0.25)); // 25% at a time
this.logger.info('Starting rolling deployment', { version, targetInstances, batchSize });
const oldInstances = Array.from(this.instances.values());
// Deploy new instances in batches
for (let i = 0; i < targetInstances; i += batchSize) {
const batchCount = Math.min(batchSize, targetInstances - i);
if (batchCount <= 0)
break;
// Start new instances
for (let j = 0; j < batchCount; j++) {
await this.startNewInstance(version);
}
// Wait for instances to become healthy
await this.waitForHealthyInstances(deployment, 3 * 60 * 1000); // 3 minutes
// Stop old instances if we have them
const instancesToStop = oldInstances.splice(0, Math.min(batchCount, oldInstances.length));
for (const instance of instancesToStop) {
await this.stopInstance(instance.id);
}
deployment.progress = Math.floor((i + batchCount) / targetInstances * 80); // 80% for deployment phase
}
}
/**
* Perform blue-green deployment
*/
async performBlueGreenDeployment(deployment, version) {
this.logger.info('Starting blue-green deployment', { version });
const targetInstances = deployment.instances.target;
const oldInstances = Array.from(this.instances.values());
// Start all new instances (green environment)
const newInstancePromises = [];
for (let i = 0; i < targetInstances; i++) {
newInstancePromises.push(this.startNewInstance(version));
}
await Promise.all(newInstancePromises);
deployment.progress = 40;
// Wait for all instances to be healthy
await this.waitForHealthyInstances(deployment, 5 * 60 * 1000); // 5 minutes
deployment.progress = 60;
// Switch traffic (would update load balancer configuration)
await this.switchTraffic(version);
deployment.progress = 80;
// Stop old instances (blue environment)
for (const instance of oldInstances) {
await this.stopInstance(instance.id);
}
}
/**
* Perform canary deployment
*/
async performCanaryDeployment(deployment, version) {
this.logger.info('Starting canary deployment', { version });
const targetInstances = deployment.instances.target;
const canaryCount = Math.max(1, Math.floor(targetInstances * 0.1)); // 10% canary
// Deploy canary instances
for (let i = 0; i < canaryCount; i++) {
await this.startNewInstance(version);
}
deployment.progress = 20;
// Monitor canary for 5 minutes
await this.monitorCanary(5 * 60 * 1000);
deployment.progress = 40;
// If canary is successful, deploy remaining instances
const remainingCount = targetInstances - canaryCount;
for (let i = 0; i < remainingCount; i++) {
await this.startNewInstance(version);
}
deployment.progress = 80;
}
/**
* Start new service instance
*/
async startNewInstance(version) {
// In a real implementation, this would start a new container or process
const instanceId = this.registerInstance({
version,
status: 'starting',
cpu: 0,
memory: 0,
endpoint: `http://service-${Math.random().toString(36).substr(2, 9)}:3000`,
metadata: { startedBy: 'deployment-service' }
});
// Simulate startup time
setTimeout(() => {
const instance = this.instances.get(instanceId);
if (instance) {
instance.status = 'healthy';
instance.cpu = Math.random() * 50 + 10; // 10-60% CPU
instance.memory = Math.random() * 40 + 20; // 20-60% Memory
}
}, 2000);
return instanceId;
}
/**
* Stop service instance
*/
async stopInstance(instanceId) {
const instance = this.instances.get(instanceId);
if (instance) {
instance.status = 'stopping';
// Simulate graceful shutdown
setTimeout(() => {
this.deregisterInstance(instanceId);
}, 1000);
}
}
/**
* Wait for instances to become healthy
*/
async waitForHealthyInstances(deployment, timeout = 5 * 60 * 1000) {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
const healthyCount = this.getHealthyInstanceCount();
const totalCount = this.instances.size;
deployment.instances.current = totalCount;
deployment.instances.healthy = healthyCount;
deployment.instances.unhealthy = totalCount - healthyCount;
if (healthyCount >= deployment.instances.target) {
return; // All instances are healthy
}
await new Promise(resolve => setTimeout(resolve, 5000)); // Wait 5 seconds
}
throw new Error('Timeout waiting for instances to become healthy');
}
/**
* Switch traffic to new version (placeholder for load balancer integration)
*/
async switchTraffic(version) {
this.logger.info('Switching traffic to new version', { version });
// In a real implementation, this would update load balancer configuration
await new Promise(resolve => setTimeout(resolve, 1000));
}
/**
* Monitor canary deployment
*/
async monitorCanary(duration) {
this.logger.info('Monitoring canary deployment', { duration });
const startTime = Date.now();
while (Date.now() - startTime < duration) {
// In a real implementation, this would check metrics like error rate, response time, etc.
const canaryInstances = Array.from(this.instances.values()).filter(i => i.status === 'healthy');
if (canaryInstances.length === 0) {
throw new Error('All canary instances are unhealthy');
}
// Simulate monitoring - in reality would check real metrics
const errorRate = Math.random() * 5; // 0-5% error rate
if (errorRate > 2) { // Fail if error rate > 2%
throw new Error(`Canary failed: high error rate ${errorRate.toFixed(2)}%`);
}
await new Promise(resolve => setTimeout(resolve, 10000)); // Check every 10 seconds
}
}
/**
* Scale up instances
*/
async scaleUp(count) {
const version = this.getLatestVersion();
for (let i = 0; i < count; i++) {
await this.startNewInstance(version);
}
// Set cooldown
this.scalingCooldown.set('up', Date.now() + this.config.scaling.scaleUpCooldown);
}
/**
* Scale down instances
*/
async scaleDown(count) {
const instances = Array.from(this.instances.values());
const instancesToStop = instances
.filter(i => i.status === 'healthy')
.sort((a, b) => a.startTime - b.startTime) // Stop oldest first
.slice(0, count);
for (const instance of instancesToStop) {
await this.stopInstance(instance.id);
}
// Set cooldown
this.scalingCooldown.set('down', Date.now() + this.config.scaling.scaleDownCooldown);
}
/**
* Get latest deployed version
*/
getLatestVersion() {
const instances = Array.from(this.instances.values());
return instances.length > 0 && instances[0] ? instances[0].version : 'latest';
}
/**
* Get healthy instance count
*/
getHealthyInstanceCount() {
return Array.from(this.instances.values()).filter(i => i.status === 'healthy').length;
}
/**
* Get unhealthy instance count
*/
getUnhealthyInstanceCount() {
return Array.from(this.instances.values()).filter(i => i.status === 'unhealthy').length;
}
/**
* Make scaling decision based on current metrics
*/
makeScalingDecision() {
const instances = Array.from(this.instances.values()).filter(i => i.status === 'healthy');
const currentInstances = instances.length;
if (currentInstances === 0) {
return {
action: 'scale-up',
currentInstances: 0,
targetInstances: this.config.scaling.minInstances,
reason: 'No healthy instances',
confidence: 1.0,
metrics: { avgCpu: 0, avgMemory: 0, requestRate: 0, responseTime: 0 }
};
}
const avgCpu = instances.reduce((sum, i) => sum + i.cpu, 0) / instances.length;
const avgMemory = instances.reduce((sum, i) => sum + i.memory, 0) / instances.length;
// Check if we're in cooldown
const now = Date.now();
const upCooldown = this.scalingCooldown.get('up') || 0;
const downCooldown = this.scalingCooldown.get('down') || 0;
if (now < upCooldown || now < downCooldown) {
return {
action: 'no-action',
currentInstances,
targetInstances: currentInstances,
reason: 'In cooldown period',
confidence: 1.0,
metrics: { avgCpu, avgMemory, requestRate: 0, responseTime: 0 }
};
}
// Scale up if CPU or memory is high
if (avgCpu > this.config.scaling.targetCpuPercent || avgMemory > this.config.scaling.targetMemoryPercent) {
const targetInstances = Math.min(currentInstances + 1, this.config.scaling.maxInstances);
if (targetInstances > currentInstances) {
return {
action: 'scale-up',
currentInstances,
targetInstances,
reason: `High resource usage: CPU ${avgCpu.toFixed(1)}%, Memory ${avgMemory.toFixed(1)}%`,
confidence: 0.8,
metrics: { avgCpu, avgMemory, requestRate: 0, responseTime: 0 }
};
}
}
// Scale down if resources are underutilized
const lowCpuThreshold = this.config.scaling.targetCpuPercent * 0.3; // 30% of target
const lowMemoryThreshold = this.config.scaling.targetMemoryPercent * 0.3;
if (avgCpu < lowCpuThreshold && avgMemory < lowMemoryThreshold && currentInstances > this.config.scaling.minInstances) {
const targetInstances = Math.max(currentInstances - 1, this.config.scaling.minInstances);
return {
action: 'scale-down',
currentInstances,
targetInstances,
reason: `Low resource usage: CPU ${avgCpu.toFixed(1)}%, Memory ${avgMemory.toFixed(1)}%`,
confidence: 0.6,
metrics: { avgCpu, avgMemory, requestRate: 0, responseTime: 0 }
};
}
return {
action: 'no-action',
currentInstances,
targetInstances: currentInstances,
reason: 'Resources within target range',
confidence: 0.7,
metrics: { avgCpu, avgMemory, requestRate: 0, responseTime: 0 }
};
}
/**
* Start health check monitoring
*/
startHealthChecks() {
this.healthCheckInterval = setInterval(() => {
this.performHealthChecks();
}, this.config.healthCheck.interval);
}
/**
* Perform health checks on all instances
*/
async performHealthChecks() {
const now = Date.now();
for (const instance of this.instances.values()) {
// Skip if instance is starting or stopping
if (instance.status === 'starting' || instance.status === 'stopping') {
continue;
}
// Update uptime
instance.uptime = now - instance.startTime;
// Simulate health check (in reality would make HTTP request)
const isHealthy = Math.random() > 0.05; // 95% success rate
instance.lastHealthCheck = now;
const previousStatus = instance.status;
instance.status = isHealthy ? 'healthy' : 'unhealthy';
if (previousStatus !== instance.status) {
this.logger.info('Instance status changed', {
instanceId: instance.id,
from: previousStatus,
to: instance.status
});
this.emit('instanceStatusChanged', instance);
}
}
}
/**
* Start autoscaling monitoring
*/
startAutoscaling() {
this.autoscalingInterval = setInterval(() => {
void this.performAutoscaling();
}, 60 * 1000); // Check every minute
}
/**
* Perform autoscaling based on current metrics
*/
async performAutoscaling() {
const decision = this.makeScalingDecision();
if (decision.action !== 'no-action' && decision.confidence > 0.7) {
this.logger.info('Autoscaling decision', decision);
try {
await this.scale(decision.targetInstances, `Autoscaling: ${decision.reason}`);
}
catch (error) {
this.logger.error('Autoscaling failed', error, decision);
}
}
}
/**
* Cleanup resources
*/
destroy() {
if (this.healthCheckInterval) {
clearInterval(this.healthCheckInterval);
}
if (this.autoscalingInterval) {
clearInterval(this.autoscalingInterval);
}
this.instances.clear();
this.deploymentHistory = [];
this.scalingCooldown.clear();
this.activeDeployment = undefined;
this.removeAllListeners();
this.logger.info('Deployment Service destroyed');
}
}
// Global deployment service instance
export const deploymentService = (logger, config) => new DeploymentService(logger, config);
//# sourceMappingURL=deployment-service.js.map