UNPKG

mcp-adr-analysis-server

Version:

MCP server for analyzing Architectural Decision Records and project architecture

1,146 lines (1,138 loc) 77.2 kB
/** * Deployment Readiness Tool - Version 2.0 * * Comprehensive deployment validation with test failure tracking, deployment history analysis, * and hard blocking integration with smart git push. * Enhanced with memory integration for deployment assessment tracking and pattern recognition. * * IMPORTANT FOR AI ASSISTANTS: This tool provides: * 1. Test Execution Validation: Zero tolerance for test failures * 2. Deployment History Analysis: Pattern detection and success rate tracking * 3. Code Quality Gates: Mock vs production code detection * 4. Hard Blocking: Prevents unsafe deployments via smart git push integration * 5. Memory Integration: Stores deployment assessments as memory entities * * Memory Dependencies: * - CREATES: deployment_assessment memory entities * - MIGRATES: Existing deployment history to memory entities * - ANALYZES: Deployment patterns across memory entities * - INTEGRATES: smart-git-push-tool for deployment blocking * - INTEGRATES: todo-file-watcher for automatic task creation */ import { z } from 'zod'; import { McpAdrError } from '../types/index.js'; import { execSync } from 'node:child_process'; import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs'; import { join, basename } from 'path'; import * as os from 'os'; import { validateMcpResponse } from '../utils/mcp-response-validator.js'; import { jsonSafeError } from '../utils/json-safe.js'; import { MemoryEntityManager } from '../utils/memory-entity-manager.js'; import { EnhancedLogger } from '../utils/enhanced-logging.js'; import { TreeSitterAnalyzer } from '../utils/tree-sitter-analyzer.js'; import { findFiles, findRelatedCode } from '../utils/file-system.js'; import { ResearchOrchestrator } from '../utils/research-orchestrator.js'; import { MermaidDiagrams } from '../utils/mermaid-diagrams.js'; // Core schemas const DeploymentReadinessSchema = z.object({ operation: z .enum([ 'check_readiness', // Full deployment readiness check 'validate_production', // Production-specific validation 'test_validation', // Test execution and failure analysis 'deployment_history', // Deployment history analysis 'full_audit', // Comprehensive audit (all checks) 'emergency_override', // Emergency bypass with justification ]) .describe('Operation to perform'), // Core Configuration projectPath: z.string().optional().describe('Project root path'), targetEnvironment: z .enum(['staging', 'production', 'integration']) .default('production') .describe('Target deployment environment'), strictMode: z.boolean().default(true).describe('Enable strict validation (recommended)'), // Code Quality Gates allowMockCode: z .boolean() .default(false) .describe('Allow mock code in deployment (NOT RECOMMENDED)'), productionCodeThreshold: z .number() .default(85) .describe('Minimum production code quality score (0-100)'), mockCodeMaxAllowed: z.number().default(0).describe('Maximum mock code indicators allowed'), // Test Failure Gates maxTestFailures: z .number() .default(0) .describe('Maximum test failures allowed (0 = zero tolerance)'), requireTestCoverage: z.number().default(80).describe('Minimum test coverage percentage required'), blockOnFailingTests: z.boolean().default(true).describe('Block deployment if tests are failing'), testSuiteRequired: z .array(z.string()) .default([]) .describe('Required test suites that must pass'), // Deployment History Gates maxRecentFailures: z.number().default(2).describe('Maximum recent deployment failures allowed'), deploymentSuccessThreshold: z .number() .default(80) .describe('Minimum deployment success rate required (%)'), blockOnRecentFailures: z.boolean().default(true).describe('Block if recent deployments failed'), rollbackFrequencyThreshold: z .number() .default(20) .describe('Maximum rollback frequency allowed (%)'), // Integration Rules requireAdrCompliance: z.boolean().default(true).describe('Require ADR compliance validation'), integrateTodoTasks: z.boolean().default(true).describe('Auto-create blocking tasks for issues'), updateHealthScoring: z.boolean().default(true).describe('Update project health scores'), triggerSmartGitPush: z.boolean().default(false).describe('Trigger smart git push validation'), // Human Override System emergencyBypass: z.boolean().default(false).describe('Emergency bypass for critical fixes'), businessJustification: z.string().optional().describe('Business justification for overrides'), approvalRequired: z.boolean().default(true).describe('Require approval for overrides'), // Memory Integration enableMemoryIntegration: z.boolean().default(true).describe('Enable memory entity storage'), migrateExistingHistory: z .boolean() .default(false) .describe('Migrate existing deployment history to memory'), // Tree-sitter Analysis enableTreeSitterAnalysis: z .boolean() .default(true) .describe('Use tree-sitter for enhanced code analysis'), treeSitterLanguages: z .array(z.string()) .default(['typescript', 'javascript', 'python', 'yaml', 'hcl']) .describe('Languages to analyze with tree-sitter'), // Research-Driven Integration enableResearchIntegration: z .boolean() .default(true) .describe('Use research-orchestrator to verify environment readiness'), researchConfidenceThreshold: z .number() .default(0.7) .describe('Minimum confidence for environment research (0-1)'), }); /** * Deployment Memory Manager for tracking deployment assessments and patterns * Supports dependency injection for improved testability * @see Issue #310 - Dependency injection for improved testability */ class DeploymentMemoryManager { memoryManager; logger; /** * Constructor with optional dependency injection * @param deps - Optional dependencies for testing (defaults create real instances) */ constructor(deps = {}) { this.memoryManager = deps.memoryManager ?? new MemoryEntityManager(); this.logger = deps.logger ?? new EnhancedLogger(); } async initialize() { await this.memoryManager.initialize(); } /** * Store deployment assessment as memory entity */ async storeDeploymentAssessment(environment, readinessData, validationResults, projectPath) { try { const assessmentData = { environment: environment, readinessScore: readinessData.overallScore / 100, // Convert to 0-1 range validationResults: { testResults: { passed: readinessData.testValidationResult.testSuitesExecuted.reduce((sum, suite) => sum + suite.passedTests, 0), failed: readinessData.testValidationResult.failureCount, coverage: readinessData.testValidationResult.coveragePercentage / 100, // Convert to 0-1 range criticalFailures: readinessData.testValidationResult.criticalTestFailures.map(f => f.testName), }, securityValidation: { vulnerabilities: 0, // Default - could be enhanced with actual security scan data securityScore: 0.8, // Default - could be enhanced with actual security analysis criticalIssues: readinessData.criticalBlockers .filter(b => b.category === 'adr_compliance') .map(b => b.title), }, performanceValidation: { performanceScore: Math.max(0, (readinessData.overallScore - 20) / 80), // Derived from overall score bottlenecks: [], resourceUtilization: {}, }, }, blockingIssues: [ ...readinessData.criticalBlockers.map(b => ({ issue: `${b.title}: ${b.description}`, severity: b.severity, category: this.mapBlockerCategory(b.category), resolution: b.resolutionSteps.join('; '), estimatedEffort: b.estimatedResolutionTime, })), ...readinessData.testFailureBlockers.map(b => ({ issue: `${b.title}: ${b.description}`, severity: b.severity, category: 'test', resolution: b.resolutionSteps.join('; '), estimatedEffort: b.estimatedResolutionTime, })), ...readinessData.deploymentHistoryBlockers.map(b => ({ issue: `${b.title}: ${b.description}`, severity: b.severity, category: 'configuration', resolution: b.resolutionSteps.join('; '), estimatedEffort: b.estimatedResolutionTime, })), ], deploymentStrategy: { type: 'rolling', // Default strategy - could be made configurable rollbackPlan: 'Automated rollback via deployment pipeline with health check validation', monitoringPlan: 'Monitor application metrics, error rates, and performance indicators for 30 minutes post-deployment', estimatedDowntime: readinessData.isDeploymentReady ? '0 minutes (rolling deployment)' : 'Cannot deploy - blockers present', }, complianceChecks: { adrCompliance: readinessData.adrComplianceResult.score / 100, // Convert to 0-1 range regulatoryCompliance: [], // Could be enhanced with actual compliance data auditTrail: [ `Deployment assessment completed at ${new Date().toISOString()}`, `Test validation: ${readinessData.testValidationResult.overallTestStatus}`, `Overall readiness score: ${readinessData.overallScore}%`, `Git push status: ${readinessData.gitPushStatus}`, ], }, }; const entity = await this.memoryManager.upsertEntity({ type: 'deployment_assessment', title: `Deployment Assessment: ${environment} - ${readinessData.isDeploymentReady ? 'READY' : 'BLOCKED'} - ${new Date().toISOString().split('T')[0]}`, description: `Deployment readiness assessment for ${environment} environment${readinessData.isDeploymentReady ? ' - APPROVED' : ' - BLOCKED'}`, tags: [ 'deployment', environment.toLowerCase(), 'readiness-assessment', readinessData.isDeploymentReady ? 'approved' : 'blocked', `score-${Math.floor(readinessData.overallScore / 10) * 10}`, ...(readinessData.criticalBlockers.length > 0 ? ['critical-issues'] : []), ...(readinessData.testValidationResult.failureCount > 0 ? ['test-failures'] : []), ...(readinessData.deploymentHistoryAnalysis.rollbackRate > 20 ? ['high-rollback-risk'] : []), ], assessmentData, relationships: [], context: { projectPhase: 'deployment-validation', technicalStack: this.extractTechnicalStack(validationResults), environmentalFactors: [environment, projectPath || 'unknown-project'].filter(Boolean), stakeholders: ['deployment-team', 'qa-team', 'infrastructure-team'], }, accessPattern: { lastAccessed: new Date().toISOString(), accessCount: 1, accessContext: ['deployment-assessment'], }, evolution: { origin: 'created', transformations: [ { timestamp: new Date().toISOString(), type: 'assessment_creation', description: `Deployment assessment created for ${environment}`, agent: 'deployment-readiness-tool', }, ], }, validation: { isVerified: readinessData.isDeploymentReady, verificationMethod: 'comprehensive-deployment-audit', verificationTimestamp: new Date().toISOString(), }, }); this.logger.info(`Deployment assessment stored for ${environment}`, 'DeploymentMemoryManager', { environment, entityId: entity.id, readinessScore: readinessData.overallScore, isReady: readinessData.isDeploymentReady, blockingIssues: assessmentData.blockingIssues.length, }); return entity.id; } catch (error) { this.logger.error('Failed to store deployment assessment', 'DeploymentMemoryManager', error); throw error; } } /** * Migrate existing deployment history to memory entities */ async migrateExistingHistory(historyPath) { try { if (!existsSync(historyPath)) { this.logger.info('No existing deployment history found to migrate', 'DeploymentMemoryManager'); return; } const historyData = JSON.parse(readFileSync(historyPath, 'utf8')); const deployments = historyData.deployments || []; let migratedCount = 0; for (const deployment of deployments) { try { await this.migrateDeploymentRecord(deployment); migratedCount++; } catch (error) { this.logger.error(`Failed to migrate deployment ${deployment.deploymentId}`, 'DeploymentMemoryManager', error); } } this.logger.info(`Migration completed: ${migratedCount}/${deployments.length} deployments migrated`, 'DeploymentMemoryManager'); } catch (error) { this.logger.error('Failed to migrate deployment history', 'DeploymentMemoryManager', error); throw error; } } /** * Analyze deployment patterns across memory entities */ async analyzeDeploymentPatterns(environment) { try { const query = { entityTypes: ['deployment_assessment'], limit: 100, sortBy: 'lastModified', }; if (environment) { query.tags = [environment.toLowerCase()]; } const assessments = await this.memoryManager.queryEntities(query); const patterns = this.detectDeploymentPatterns(assessments.entities); const trends = this.calculateDeploymentTrends(assessments.entities); const recommendations = this.generatePatternRecommendations(patterns, trends); const riskFactors = this.identifyRiskFactors(assessments.entities); return { patterns, trends, recommendations, riskFactors, }; } catch (error) { this.logger.error('Failed to analyze deployment patterns', 'DeploymentMemoryManager', error); throw error; } } /** * Compare current assessment with historical patterns */ async compareWithHistory(currentAssessment, environment) { try { const recentAssessments = await this.memoryManager.queryEntities({ entityTypes: ['deployment_assessment'], tags: [environment.toLowerCase()], limit: 10, sortBy: 'lastModified', }); if (recentAssessments.entities.length === 0) { return { isImprovement: true, comparison: { type: 'first_assessment' }, insights: ['This is the first deployment assessment for this environment'], }; } const lastAssessment = recentAssessments.entities[0]; const comparison = this.compareAssessments(currentAssessment, lastAssessment.assessmentData); return { isImprovement: comparison.scoreImprovement > 0, comparison, insights: this.generateComparisonInsights(comparison), }; } catch (error) { this.logger.error('Failed to compare with history', 'DeploymentMemoryManager', error); return { isImprovement: false, comparison: { type: 'comparison_failed' }, insights: ['Unable to compare with historical data'], }; } } // Private helper methods async migrateDeploymentRecord(deployment) { const assessmentData = { environment: deployment.environment, readinessScore: deployment.status === 'success' ? 1.0 : 0.0, // Use 0-1 range validationResults: { testResults: deployment.testResults ? { passed: deployment.testResults.testSuitesExecuted.reduce((sum, suite) => sum + suite.passedTests, 0), failed: deployment.testResults.failureCount, coverage: deployment.testResults.coveragePercentage / 100, criticalFailures: deployment.testResults.criticalTestFailures.map(f => f.testName), } : { passed: 0, failed: 0, coverage: 0, criticalFailures: [], }, securityValidation: { vulnerabilities: 0, securityScore: 0.8, criticalIssues: [], }, performanceValidation: { performanceScore: deployment.status === 'success' ? 0.8 : 0.2, bottlenecks: [], resourceUtilization: {}, }, }, blockingIssues: deployment.failureReason ? [ { issue: `Historical Deployment Failure: ${deployment.failureReason}`, severity: 'high', category: 'configuration', resolution: 'Review and address historical failure causes', }, ] : [], deploymentStrategy: { type: 'rolling', rollbackPlan: 'Standard rollback procedure', monitoringPlan: 'Basic monitoring', estimatedDowntime: deployment.rollbackRequired ? 'Variable' : '0 minutes', }, complianceChecks: { adrCompliance: 1.0, regulatoryCompliance: [], auditTrail: [ `Migrated deployment record from ${deployment.timestamp}`, `Original status: ${deployment.status}`, `Rollback required: ${deployment.rollbackRequired}`, ], }, }; await this.memoryManager.upsertEntity({ type: 'deployment_assessment', title: `Historical Deployment: ${deployment.environment} - ${deployment.status.toUpperCase()} - ${deployment.timestamp.split('T')[0]}`, description: `Migrated deployment record for ${deployment.environment} (ID: ${deployment.deploymentId})`, tags: [ 'deployment', deployment.environment.toLowerCase(), 'migrated-record', deployment.status, ...(deployment.rollbackRequired ? ['rollback-required'] : []), ], assessmentData, relationships: [], context: { projectPhase: 'deployment-execution', technicalStack: [], environmentalFactors: [deployment.environment], stakeholders: ['deployment-team'], }, accessPattern: { lastAccessed: new Date().toISOString(), accessCount: 1, accessContext: ['migration'], }, evolution: { origin: 'imported', transformations: [ { timestamp: new Date().toISOString(), type: 'migration', description: `Migrated from deployment-history.json (original: ${deployment.timestamp})`, agent: 'deployment-readiness-tool', }, ], }, validation: { isVerified: true, verificationMethod: 'historical-migration', verificationTimestamp: new Date().toISOString(), }, }); } extractTechnicalStack(_validationResults) { // Extract technical stack from validation results // This is a simplified implementation return []; } detectDeploymentPatterns(assessments) { // Analyze deployment patterns across assessments const patterns = []; // Pattern: Time-based failures const timePatterns = this.analyzeTimePatterns(assessments); if (timePatterns.length > 0) { patterns.push({ type: 'time_based', patterns: timePatterns }); } // Pattern: Environment-specific issues const envPatterns = this.analyzeEnvironmentPatterns(assessments); if (envPatterns.length > 0) { patterns.push({ type: 'environment_specific', patterns: envPatterns }); } return patterns; } calculateDeploymentTrends(assessments) { if (assessments.length < 3) return []; const trends = []; const scores = assessments.map((a) => a.assessmentData.readinessScore); // Calculate score trend const scoreTrend = this.calculateTrend(scores); trends.push({ metric: 'readiness_score', trend: scoreTrend > 0 ? 'improving' : scoreTrend < 0 ? 'declining' : 'stable', change: scoreTrend, }); return trends; } generatePatternRecommendations(patterns, trends) { const recommendations = []; // Generate recommendations based on patterns patterns.forEach(pattern => { if (pattern.type === 'time_based') { recommendations.push('Consider scheduling deployments during low-risk time windows'); } if (pattern.type === 'environment_specific') { recommendations.push('Address environment-specific configuration issues'); } }); // Generate recommendations based on trends trends.forEach(trend => { if (trend.metric === 'readiness_score' && trend.trend === 'declining') { recommendations.push('Investigate causes of declining deployment readiness scores'); } }); return recommendations; } /** * Map deployment blocker category to schema-compliant category */ mapBlockerCategory(category) { switch (category) { case 'test_failure': return 'test'; case 'adr_compliance': case 'environment': case 'deployment_history': return 'configuration'; case 'code_quality': return 'performance'; default: return 'configuration'; } } identifyRiskFactors(assessments) { const riskFactors = []; // Analyze recent failures const recentFailures = assessments .filter((a) => !a.assessmentData.deploymentReady) .slice(0, 5); if (recentFailures.length >= 3) { riskFactors.push({ factor: 'frequent_failures', description: `${recentFailures.length} deployment blocks in recent assessments`, severity: 'high', }); } return riskFactors; } compareAssessments(current, historical) { return { scoreImprovement: current.overallScore - historical.readinessScore, confidenceChange: current.confidence - historical.confidence, blockingIssuesChange: current.criticalBlockers.length - historical.blockingIssues.length, testImprovements: { failureCountChange: current.testValidationResult.failureCount - (historical.validationResults?.testValidation?.failureCount || 0), coverageChange: current.testValidationResult.coveragePercentage - (historical.validationResults?.testValidation?.coveragePercentage || 0), }, }; } generateComparisonInsights(comparison) { const insights = []; if (comparison.scoreImprovement > 0) { insights.push(`Deployment readiness improved by ${comparison.scoreImprovement} points`); } else if (comparison.scoreImprovement < 0) { insights.push(`Deployment readiness declined by ${Math.abs(comparison.scoreImprovement)} points`); } if (comparison.testImprovements.failureCountChange < 0) { insights.push(`Test stability improved: ${Math.abs(comparison.testImprovements.failureCountChange)} fewer failures`); } if (comparison.testImprovements.coverageChange > 0) { insights.push(`Test coverage increased by ${comparison.testImprovements.coverageChange}%`); } return insights; } analyzeTimePatterns(_assessments) { // Simplified time pattern analysis return []; } analyzeEnvironmentPatterns(_assessments) { // Simplified environment pattern analysis return []; } calculateTrend(values) { if (values.length < 2) return 0; const recent = values.slice(0, Math.min(5, values.length)); const older = values.slice(Math.min(5, values.length)); const recentAvg = recent.reduce((a, b) => a + b, 0) / recent.length; const olderAvg = older.length > 0 ? older.reduce((a, b) => a + b, 0) / older.length : recentAvg; return recentAvg - olderAvg; } } /** * Main deployment readiness function */ export async function deploymentReadiness(args) { try { const validatedArgs = DeploymentReadinessSchema.parse(args); // Initialize paths and cache const projectPath = validatedArgs.projectPath || process.cwd(); const projectName = basename(projectPath); const cacheDir = join(os.tmpdir(), projectName, 'cache'); const deploymentHistoryPath = join(cacheDir, 'deployment-history.json'); const readinessCachePath = join(cacheDir, 'deployment-readiness-cache.json'); // Ensure cache directory exists if (!existsSync(cacheDir)) { mkdirSync(cacheDir, { recursive: true }); } // Initialize memory manager if enabled let memoryManager = null; if (validatedArgs.enableMemoryIntegration) { memoryManager = new DeploymentMemoryManager(); await memoryManager.initialize(); // Migrate existing history if requested if (validatedArgs.migrateExistingHistory) { await memoryManager.migrateExistingHistory(deploymentHistoryPath); } } let result; switch (validatedArgs.operation) { case 'test_validation': result = await performTestValidation(validatedArgs, projectPath); break; case 'deployment_history': result = await performDeploymentHistoryAnalysis(validatedArgs, deploymentHistoryPath); break; case 'check_readiness': case 'validate_production': case 'full_audit': result = await performFullAudit(validatedArgs, projectPath, deploymentHistoryPath); break; case 'emergency_override': result = await performEmergencyOverride(validatedArgs, projectPath); break; default: throw new McpAdrError('INVALID_ARGS', `Unknown operation: ${validatedArgs.operation}`); } // Cache result for performance writeFileSync(readinessCachePath, JSON.stringify({ timestamp: new Date().toISOString(), operation: validatedArgs.operation, result, }, null, 2)); // Memory integration: store assessment and analyze patterns let memoryIntegrationInfo = ''; if (memoryManager) { try { // Store deployment assessment const assessmentId = await memoryManager.storeDeploymentAssessment(validatedArgs.targetEnvironment, result, { projectPath, operation: validatedArgs.operation }, projectPath); // Compare with historical patterns const historyComparison = await memoryManager.compareWithHistory(result, validatedArgs.targetEnvironment); // Analyze deployment patterns const patternAnalysis = await memoryManager.analyzeDeploymentPatterns(validatedArgs.targetEnvironment); memoryIntegrationInfo = ` ## 🧠 Memory Integration Analysis - **Assessment Stored**: ✅ Deployment assessment saved (ID: ${assessmentId.substring(0, 8)}...) - **Environment**: ${validatedArgs.targetEnvironment} - **Historical Comparison**: ${historyComparison.isImprovement ? '📈 Improvement detected' : '📊 Baseline established'} ${historyComparison.insights.length > 0 ? `### Historical Insights ${historyComparison.insights.map(insight => `- ${insight}`).join('\n')} ` : ''} ${patternAnalysis.trends.length > 0 ? `### Deployment Trends ${patternAnalysis.trends.map(trend => `- **${trend.metric}**: ${trend.trend} (${trend.change > 0 ? '+' : ''}${trend.change})`).join('\n')} ` : ''} ${patternAnalysis.recommendations.length > 0 ? `### Pattern-Based Recommendations ${patternAnalysis.recommendations.map(rec => `- ${rec}`).join('\n')} ` : ''} ${patternAnalysis.riskFactors.length > 0 ? `### Risk Factors Identified ${patternAnalysis.riskFactors.map(risk => `- **${risk.factor}**: ${risk.description} (${risk.severity})`).join('\n')} ` : ''} `; } catch (memoryError) { memoryIntegrationInfo = ` ## 🧠 Memory Integration Status - **Status**: ⚠️ Memory integration failed - assessment continued without persistence - **Error**: ${memoryError instanceof Error ? memoryError.message : 'Unknown error'} `; } } // Generate enhanced response with memory integration const baseResponse = generateDeploymentReadinessResponse(result, validatedArgs); // Add memory integration info if available if (memoryIntegrationInfo && baseResponse.content?.[0]?.text) { baseResponse.content[0].text += memoryIntegrationInfo; } return baseResponse; } catch (error) { throw new McpAdrError('DEPLOYMENT_READINESS_ERROR', `Deployment readiness check failed: ${jsonSafeError(error)}`); } } /** * Perform comprehensive test validation */ async function performTestValidation(args, projectPath) { const testResult = await executeTestSuite(projectPath, args.testSuiteRequired); const testBlockers = []; // Check for test failures if (testResult.failureCount > args.maxTestFailures) { testBlockers.push({ category: 'test_failure', title: 'Test Failures Detected', description: `${testResult.failureCount} test failures found (max allowed: ${args.maxTestFailures})`, severity: 'critical', impact: 'Blocks deployment due to failing tests', resolutionSteps: [ 'Run npm test to see detailed failures', 'Fix failing tests one by one', 'Ensure all tests pass before deployment', 'Consider increasing test coverage', ], estimatedResolutionTime: `${Math.ceil(testResult.failureCount * 0.5)} hours`, blocksDeployment: args.blockOnFailingTests, }); } // Check test coverage if (testResult.coveragePercentage < args.requireTestCoverage) { testBlockers.push({ category: 'test_failure', title: 'Insufficient Test Coverage', description: `Test coverage is ${testResult.coveragePercentage}% (minimum required: ${args.requireTestCoverage}%)`, severity: 'high', impact: 'May indicate untested code paths', resolutionSteps: [ 'Add tests for uncovered code', 'Run npm run test:coverage to see detailed coverage', 'Focus on critical business logic first', ], estimatedResolutionTime: '2-4 hours', blocksDeployment: args.strictMode, }); } // Basic result structure return { isDeploymentReady: testBlockers.length === 0, overallScore: calculateTestScore(testResult, args), confidence: 85, codeQualityAnalysis: await analyzeCodeQualityWithTreeSitter(args.projectPath || process.cwd(), args.enableTreeSitterAnalysis, args.treeSitterLanguages), testValidationResult: testResult, deploymentHistoryAnalysis: { recentDeployments: [], successRate: 100, rollbackRate: 0, averageDeploymentTime: 0, failurePatterns: [], environmentStability: { stabilityScore: 100, riskLevel: 'low', recommendation: 'Proceed with deployment', }, recommendedAction: 'proceed', }, adrComplianceResult: { score: 100, compliantAdrs: 0, totalAdrs: 0, missingImplementations: [], recommendations: [], }, criticalBlockers: testBlockers.filter(b => b.severity === 'critical'), testFailureBlockers: testBlockers, deploymentHistoryBlockers: [], warnings: [], todoTasksCreated: [], healthScoreUpdate: {}, gitPushStatus: testBlockers.length === 0 ? 'allowed' : 'blocked', overrideStatus: {}, }; } /** * Execute test suite and analyze results */ async function executeTestSuite(projectPath, _requiredSuites) { const startTime = Date.now(); let testOutput = ''; let exitCode = 0; try { // Try to run tests with different commands const testCommands = ['npm test', 'yarn test', 'npx jest']; for (const command of testCommands) { try { testOutput = execSync(command, { cwd: projectPath, encoding: 'utf8', timeout: 300000, // 5 minute timeout }); break; } catch (error) { if (error.status !== undefined) { // Command executed but tests failed testOutput = error.stdout + error.stderr; exitCode = error.status; break; } // Command not found, try next } } } catch (error) { testOutput = `Test execution failed: ${error}`; exitCode = 1; } const executionTime = Date.now() - startTime; // Parse test results (simplified for now) const testSuites = parseTestOutput(testOutput); const totalFailures = testSuites.reduce((sum, suite) => sum + suite.failedTests, 0); const overallStatus = exitCode === 0 ? 'passed' : totalFailures > 0 ? 'failed' : 'partial'; // Check coverage (simplified) const coverage = await checkTestCoverage(projectPath); return { testSuitesExecuted: testSuites, overallTestStatus: overallStatus, failureCount: totalFailures, coveragePercentage: coverage, requiredSuitesMissing: [], criticalTestFailures: testSuites.flatMap(suite => suite.failureDetails.filter(f => f.severity === 'critical')), testExecutionTime: executionTime, lastTestRun: new Date().toISOString(), }; } /** * Parse test output to extract results */ function parseTestOutput(output) { // Simplified parser - in production, would handle Jest, Mocha, etc. const lines = output.split('\n'); const suites = []; // Look for Jest-style output let currentSuite = {}; for (const line of lines) { if (line.includes('PASS') || line.includes('FAIL')) { if (currentSuite.suiteName) { suites.push(currentSuite); } currentSuite = { suiteName: line.split(' ').pop() || 'unknown', status: line.includes('PASS') ? 'passed' : 'failed', passedTests: 0, failedTests: 0, coverage: 0, executionTime: 0, failureDetails: [], }; } // Count tests if (line.includes('✓') || line.includes('✗')) { if (line.includes('✓')) { currentSuite.passedTests = (currentSuite.passedTests || 0) + 1; } else { currentSuite.failedTests = (currentSuite.failedTests || 0) + 1; // Add failure detail currentSuite.failureDetails?.push({ testName: line.trim(), testSuite: currentSuite.suiteName || 'unknown', errorMessage: line, severity: 'medium', blocksDeployment: true, relatedFiles: [], }); } } } if (currentSuite.suiteName) { suites.push(currentSuite); } return suites.length > 0 ? suites : [ { suiteName: 'default', status: output.includes('failing') ? 'failed' : 'passed', passedTests: 0, failedTests: output.includes('failing') ? 1 : 0, coverage: 0, executionTime: 0, failureDetails: [], }, ]; } /** * Check test coverage */ async function checkTestCoverage(projectPath) { try { // Try to read coverage from common locations const coverageFiles = [ 'coverage/lcov-report/index.html', 'coverage/coverage-summary.json', 'coverage/coverage-final.json', ]; for (const file of coverageFiles) { const filePath = join(projectPath, file); if (existsSync(filePath)) { const content = readFileSync(filePath, 'utf8'); // Extract coverage percentage (simplified) const match = content.match(/(\d+(?:\.\d+)?)%/); if (match && match[1]) { return parseFloat(match[1]); } } } } catch { // Coverage not available } return 0; // Default to 0 if coverage cannot be determined } /** * Perform deployment history analysis */ async function performDeploymentHistoryAnalysis(args, historyPath) { const history = loadDeploymentHistory(historyPath); const analysis = analyzeDeploymentHistory(history, args.targetEnvironment); const historyBlockers = []; // Check success rate if (analysis.successRate < args.deploymentSuccessThreshold) { historyBlockers.push({ category: 'deployment_history', title: 'Low Deployment Success Rate', description: `Success rate is ${analysis.successRate}% (minimum required: ${args.deploymentSuccessThreshold}%)`, severity: 'high', impact: 'Indicates potential infrastructure or process issues', resolutionSteps: [ 'Review recent deployment failures', 'Fix underlying infrastructure issues', 'Improve deployment process reliability', 'Add more comprehensive pre-deployment checks', ], estimatedResolutionTime: '1-2 days', blocksDeployment: args.blockOnRecentFailures, }); } // Check rollback rate if (analysis.rollbackRate > args.rollbackFrequencyThreshold) { historyBlockers.push({ category: 'deployment_history', title: 'High Rollback Frequency', description: `Rollback rate is ${analysis.rollbackRate}% (threshold: ${args.rollbackFrequencyThreshold}%)`, severity: 'medium', impact: 'May indicate deployment quality issues', resolutionSteps: [ 'Improve testing before deployment', 'Add more validation steps', 'Review rollback causes', 'Strengthen deployment pipeline', ], estimatedResolutionTime: '4-8 hours', blocksDeployment: args.strictMode, }); } return { isDeploymentReady: historyBlockers.length === 0, overallScore: Math.min(analysis.successRate, 100 - analysis.rollbackRate), confidence: 80, codeQualityAnalysis: await analyzeCodeQualityWithTreeSitter(args.projectPath || process.cwd(), args.enableTreeSitterAnalysis, args.treeSitterLanguages), testValidationResult: { testSuitesExecuted: [], overallTestStatus: 'not_run', failureCount: 0, coveragePercentage: 0, requiredSuitesMissing: [], criticalTestFailures: [], testExecutionTime: 0, lastTestRun: '', }, deploymentHistoryAnalysis: analysis, adrComplianceResult: { score: 100, compliantAdrs: 0, totalAdrs: 0, missingImplementations: [], recommendations: [], }, criticalBlockers: historyBlockers.filter(b => b.severity === 'critical'), testFailureBlockers: [], deploymentHistoryBlockers: historyBlockers, warnings: [], todoTasksCreated: [], healthScoreUpdate: {}, gitPushStatus: historyBlockers.length === 0 ? 'allowed' : 'blocked', overrideStatus: {}, }; } /** * Load deployment history from cache */ function loadDeploymentHistory(historyPath) { if (!existsSync(historyPath)) { return { deployments: [] }; } try { const content = readFileSync(historyPath, 'utf8'); return JSON.parse(content); } catch { return { deployments: [] }; } } /** * Analyze deployment history patterns */ function analyzeDeploymentHistory(history, environment) { const recentDeployments = history.deployments .filter(d => d.environment === environment) .slice(0, 10); const successCount = recentDeployments.filter(d => d.status === 'success').length; const rollbackCount = recentDeployments.filter(d => d.rollbackRequired).length; const successRate = recentDeployments.length > 0 ? (successCount / recentDeployments.length) * 100 : 100; const rollbackRate = recentDeployments.length > 0 ? (rollbackCount / recentDeployments.length) * 100 : 0; const failurePatterns = analyzeFailurePatterns(recentDeployments.filter(d => d.status === 'failed')); return { recentDeployments, successRate, rollbackRate, averageDeploymentTime: calculateAverageDeploymentTime(recentDeployments), failurePatterns, environmentStability: assessEnvironmentStability(successRate, rollbackRate), recommendedAction: recommendAction(successRate, rollbackRate, failurePatterns.length), }; } /** * Analyze failure patterns */ function analyzeFailurePatterns(failedDeployments) { const patterns = new Map(); failedDeployments.forEach(deployment => { if (deployment.failureReason) { const category = categorizeFailure(deployment.failureReason); const existing = patterns.get(category); if (existing) { existing.frequency++; existing.lastOccurrence = deployment.timestamp; } else { patterns.set(category, { pattern: category, frequency: 1, environments: [deployment.environment], lastOccurrence: deployment.timestamp, resolution: suggestResolution(category), preventable: isPreventable(category), }); } } }); return Array.from(patterns.values()); } /** * Research environment readiness using research-orchestrator */ async function performEnvironmentResearch(args, projectPath) { if (!args.enableResearchIntegration) { return { answer: 'Environment research disabled', confidence: 1.0, sources: [], needsWebSearch: false, warnings: [], }; } try { const orchestrator = new ResearchOrchestrator(projectPath, 'docs/adrs'); const researchQuestion = `Verify deployment readiness for ${args.targetEnvironment} environment: 1. Are required deployment tools available (Docker/Podman, Kubernetes/OpenShift)? 2. What is the current infrastructure state and health? 3. Are environment configurations present and valid? 4. What deployment patterns are documented in ADRs? 5. Are there any known deployment blockers or issues?`; const research = await orchestrator.answerResearchQuestion(researchQuestion); const warnings = []; // Check confidence level if (research.confidence < args.researchConfidenceThreshold) { warnings.push(`Research confidence (${(research.confidence * 100).toFixed(1)}%) below threshold (${(args.researchConfidenceThreshold * 100).toFixed(1)}%)`); } // Check if web search is needed if (research.needsWebSearch) { warnings.push('Local environment data insufficient - external research may be needed'); } // Check for environment capability availability const hasKubernetes = research.sources.some(s => s.type === 'environment' && s.data?.capabilities?.includes('kubernetes')); const hasDocker = research.sources.some(s => s.type === 'environment' && s.data?.capabilities?.includes('docker')); const hasOpenShift = research.sources.some(s => s.type === 'environment' && s.data?.capabilities?.includes('openshift')); const hasPodman = research.sources.some(s => s.type === 'environment' && s.data?.capabilities?.includes('podman')); if (!hasKubernetes && !hasOpenShift && !hasDocker && !hasPodman) { warnings.push('No container orchestration tools detected - manual deployment verification required'); } return { answer: research.answer || 'No environment research results available', confidence: research.confidence, sources: research.sources.map(s => ({ type: s.type, found: true, // Sources in array are already found })), needsWebSearch: research.needsWebSearch, warnings, }; } catch (error) { return { answer: `Environment research failed: ${error instanceof Error ? error.message : String(error)}`, confidence: 0, sources: [], needsWebSearch: true, warnings: [ 'Failed to perform environment research - proceeding without environment validation', ], }; } } /** * Perform full audit (all checks) */ async function performFullAudit(args, projectPath, historyPath) { // Step 0: Research environment readiness const environmentResearch = await performEnvironmentResearch(args, projectPath); // Combine all validations const testResult = await performTestValidation(args, projectPath); const historyResult = await performDeploymentHistoryAnalysis(args, historyPath); // Smart Code Linking - Enhanced deployment readiness with ADR analysis let smartCodeAnalysis = ''; let adrComplianceResult = testResult.adrComplianceResult; if (args.requireAdrCompliance) { try { // Discover ADRs in the project const { discoverAdrsInDirectory } = await import('../utils/adr-discovery.js'); const adrDirectory = 'docs/adrs'; const discoveryResu