UNPKG

ai-debug-local-mcp

Version:

๐ŸŽฏ ENHANCED AI GUIDANCE v4.1.2: Dramatically improved tool descriptions help AI users choose the right tools instead of 'close enough' options. Ultra-fast keyboard automation (10x speed), universal recording, multi-ecosystem debugging support, and compreh

632 lines (606 loc) โ€ข 25.6 kB
/** * Code Quality Integration Handler * Implements file change detection, visual validation, and refactoring safety net * Based on user feedback for systematic development cycle enhancement */ import { FileChangeMonitor } from '../utils/file-change-monitor.js'; import { VisualValidationEngine } from '../utils/visual-validation-engine.js'; import { RefactoringSafetyNet } from '../utils/refactoring-safety-net.js'; export class CodeQualityIntegrationHandler { tools = [ { name: 'start_file_change_monitoring', description: 'Start monitoring file changes during development with automatic screenshot capture', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Root path of the project to monitor' }, filePatterns: { type: 'array', items: { type: 'string' }, description: 'File patterns to monitor (e.g., ["*.ts", "*.tsx", "*.js", "*.jsx"])', default: ['*.ts', '*.tsx', '*.js', '*.jsx', '*.vue', '*.svelte'] }, screenshotOnChange: { type: 'boolean', description: 'Automatically capture screenshots when files change', default: true }, sessionId: { type: 'string', description: 'Active debugging session ID for screenshot capture' } }, required: ['projectPath', 'sessionId'] } }, { name: 'stop_file_change_monitoring', description: 'Stop file change monitoring and generate change report', inputSchema: { type: 'object', properties: { generateReport: { type: 'boolean', description: 'Generate a detailed report of all changes detected', default: true } } } }, { name: 'capture_refactoring_baseline', description: 'Capture baseline screenshots and state before major refactoring', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Active debugging session ID' }, refactoringName: { type: 'string', description: 'Name/description of the refactoring being performed' }, criticalPages: { type: 'array', items: { type: 'string' }, description: 'URLs of critical pages to capture for comparison' }, includeState: { type: 'boolean', description: 'Include application state in baseline capture', default: true } }, required: ['sessionId', 'refactoringName'] } }, { name: 'validate_after_refactoring', description: 'Compare current state with baseline after refactoring completion', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Active debugging session ID' }, baselineId: { type: 'string', description: 'ID of the baseline to compare against' }, toleranceLevel: { type: 'string', enum: ['strict', 'moderate', 'loose'], description: 'Visual comparison tolerance level', default: 'moderate' } }, required: ['sessionId', 'baselineId'] } }, { name: 'setup_visual_test_integration', description: 'Integrate with test runners for automatic visual validation during tests', inputSchema: { type: 'object', properties: { testFramework: { type: 'string', enum: ['jest', 'cypress', 'playwright', 'vitest'], description: 'Test framework to integrate with' }, testPattern: { type: 'string', description: 'Test file pattern to watch', default: '**/*.test.{js,ts,jsx,tsx}' }, screenshotOnFailure: { type: 'boolean', description: 'Capture screenshots when tests fail', default: true }, sessionId: { type: 'string', description: 'Active debugging session ID' } }, required: ['testFramework', 'sessionId'] } }, { name: 'analyze_ui_stability', description: 'Analyze UI stability across file changes and provide stability score', inputSchema: { type: 'object', properties: { timeRange: { type: 'string', description: 'Time range to analyze (e.g., "1h", "24h", "7d")', default: '1h' }, includePerformance: { type: 'boolean', description: 'Include performance metrics in stability analysis', default: true } } } }, { name: 'set_code_quality_gates', description: 'Configure automated quality gates for file size limits and test coverage', inputSchema: { type: 'object', properties: { maxFileSize: { type: 'number', description: 'Maximum file size in lines (default: 300)', default: 300 }, minTestCoverage: { type: 'number', description: 'Minimum test coverage percentage', default: 80 }, enableSizeWarnings: { type: 'boolean', description: 'Show warnings when files exceed size limits', default: true }, blockOnFailure: { type: 'boolean', description: 'Block operations when quality gates fail', default: false } } } }, { name: 'get_change_impact_analysis', description: 'Analyze the impact of recent file changes on UI and functionality', inputSchema: { type: 'object', properties: { changeId: { type: 'string', description: 'Specific change ID to analyze, or latest if not provided' }, includeVisualDiff: { type: 'boolean', description: 'Include visual differences in the analysis', default: true }, includePerformanceImpact: { type: 'boolean', description: 'Include performance impact analysis', default: true } } } } ]; fileChangeMonitor; visualValidationEngine; refactoringSafetyNet; constructor() { this.fileChangeMonitor = new FileChangeMonitor(); this.visualValidationEngine = new VisualValidationEngine(); this.refactoringSafetyNet = new RefactoringSafetyNet(); } async handle(toolName, args, sessions) { try { switch (toolName) { case 'start_file_change_monitoring': return await this.startFileChangeMonitoring(args, sessions); case 'stop_file_change_monitoring': return await this.stopFileChangeMonitoring(args); case 'capture_refactoring_baseline': return await this.captureRefactoringBaseline(args, sessions); case 'validate_after_refactoring': return await this.validateAfterRefactoring(args, sessions); case 'setup_visual_test_integration': return await this.setupVisualTestIntegration(args, sessions); case 'analyze_ui_stability': return await this.analyzeUIStability(args); case 'set_code_quality_gates': return await this.setCodeQualityGates(args); case 'get_change_impact_analysis': return await this.getChangeImpactAnalysis(args); default: throw new Error(`Unknown tool: ${toolName}`); } } catch (error) { return { content: [{ type: 'text', text: `Error in Code Quality Integration: ${error instanceof Error ? error.message : String(error)}` }] }; } } /** * Start monitoring file changes with automatic screenshot capture */ async startFileChangeMonitoring(args, sessions) { const { projectPath, filePatterns, screenshotOnChange, sessionId } = args; const session = sessions.get(sessionId); if (!session) { return { content: [{ type: 'text', text: 'โŒ No active debugging session found. Please start a session with inject_debugging first.' }] }; } const monitoringConfig = await this.fileChangeMonitor.startMonitoring({ projectPath, filePatterns: filePatterns || ['*.ts', '*.tsx', '*.js', '*.jsx', '*.vue', '*.svelte'], screenshotOnChange, sessionId, session }); return { content: [{ type: 'text', text: `๐Ÿ” **File Change Monitoring Started** ๐Ÿ“ **Project Path**: ${projectPath} ๐Ÿ“‹ **File Patterns**: ${filePatterns?.join(', ') || 'Default patterns'} ๐Ÿ“ธ **Screenshot on Change**: ${screenshotOnChange ? 'Enabled' : 'Disabled'} ๐Ÿ†” **Monitor ID**: ${monitoringConfig.monitorId} โœ… Monitoring ${monitoringConfig.watchedFiles} files for changes. ๐Ÿ“Š All changes will be logged with timestamps and impact analysis. ${screenshotOnChange ? '๐Ÿ“ท Screenshots will be automatically captured when files change.' : ''} **Next Steps:** 1. Make code changes as part of your development workflow 2. Screenshots and change logs will be captured automatically 3. Use \`stop_file_change_monitoring\` to get a comprehensive report` }] }; } /** * Stop file change monitoring and generate report */ async stopFileChangeMonitoring(args) { const { generateReport } = args; const report = await this.fileChangeMonitor.stopMonitoring(generateReport); if (!report) { return { content: [{ type: 'text', text: 'โš ๏ธ No active file monitoring session found.' }] }; } let responseText = `๐Ÿ **File Change Monitoring Stopped** ๐Ÿ“Š **Session Summary**: - **Duration**: ${report.duration} - **Files Changed**: ${report.changedFiles.length} - **Total Changes**: ${report.totalChanges} - **Screenshots Captured**: ${report.screenshotsCaptured} `; if (report.changedFiles.length > 0) { responseText += `๐Ÿ“ **Changed Files**:\n`; report.changedFiles.forEach((file) => { responseText += `- \`${file.path}\` (${file.changeCount} changes)\n`; }); } if (report.uiStabilityScore !== undefined) { responseText += `\n๐ŸŽฏ **UI Stability Score**: ${report.uiStabilityScore}/100\n`; if (report.uiStabilityScore < 80) { responseText += `โš ๏ธ UI stability below recommended threshold. Consider reviewing recent changes.\n`; } } if (generateReport && report.detailedReport) { responseText += `\n๐Ÿ“‹ **Detailed Report**: Available at ${report.detailedReport}`; } return { content: [{ type: 'text', text: responseText }] }; } /** * Capture baseline before refactoring */ async captureRefactoringBaseline(args, sessions) { const { sessionId, refactoringName, criticalPages, includeState } = args; const session = sessions.get(sessionId); if (!session) { return { content: [{ type: 'text', text: 'โŒ No active debugging session found. Please start a session with inject_debugging first.' }] }; } const baseline = await this.refactoringSafetyNet.captureBaseline({ sessionId, refactoringName, criticalPages: criticalPages || [], includeState, session }); return { content: [{ type: 'text', text: `๐Ÿ“ธ **Refactoring Baseline Captured** ๐Ÿท๏ธ **Refactoring**: ${refactoringName} ๐Ÿ†” **Baseline ID**: ${baseline.baselineId} ๐Ÿ“… **Captured At**: ${new Date(baseline.timestamp).toLocaleString()} ๐Ÿ“Š **Captured Data**: - **Screenshots**: ${baseline.screenshots.length} pages - **Application State**: ${includeState ? 'Included' : 'Not included'} - **Performance Metrics**: ${baseline.performanceMetrics ? 'Captured' : 'Not captured'} ${criticalPages && criticalPages.length > 0 ? `๐Ÿ“ **Critical Pages Captured**:\n${criticalPages.map((page) => `- ${page}`).join('\n')}` : ''} โœ… **Ready for Refactoring**: You can now safely make your changes. ๐Ÿ” **After refactoring**, use \`validate_after_refactoring\` with baseline ID: \`${baseline.baselineId}\`` }] }; } /** * Validate changes after refactoring */ async validateAfterRefactoring(args, sessions) { const { sessionId, baselineId, toleranceLevel } = args; const session = sessions.get(sessionId); if (!session) { return { content: [{ type: 'text', text: 'โŒ No active debugging session found. Please start a session with inject_debugging first.' }] }; } const validation = await this.refactoringSafetyNet.validateAfterRefactoring({ sessionId, baselineId, toleranceLevel: toleranceLevel || 'moderate', session }); if (!validation.baseline) { return { content: [{ type: 'text', text: `โŒ Baseline with ID '${baselineId}' not found. Please check the baseline ID.` }] }; } let responseText = `๐Ÿ” **Refactoring Validation Results** ๐Ÿท๏ธ **Refactoring**: ${validation.baseline.refactoringName} ๐Ÿ†” **Baseline ID**: ${baselineId} ๐Ÿ“Š **Overall Score**: ${validation.overallScore}/100 `; // Visual comparison results if (validation.visualComparison.length > 0) { responseText += `๐Ÿ“ธ **Visual Comparison Results**:\n`; validation.visualComparison.forEach((result) => { const status = result.similarity > 90 ? 'โœ…' : result.similarity > 75 ? 'โš ๏ธ' : 'โŒ'; responseText += `${status} ${result.page}: ${result.similarity}% similarity\n`; }); } // Performance impact if (validation.performanceImpact) { const impact = validation.performanceImpact; responseText += `\nโšก **Performance Impact**:\n`; responseText += `- Load Time: ${impact.loadTimeChange > 0 ? '+' : ''}${impact.loadTimeChange}ms\n`; responseText += `- Memory Usage: ${impact.memoryChange > 0 ? '+' : ''}${impact.memoryChange}MB\n`; } // Issues found if (validation.issuesFound.length > 0) { responseText += `\n๐Ÿšจ **Issues Found**:\n`; validation.issuesFound.forEach((issue) => { responseText += `- ${issue.severity.toUpperCase()}: ${issue.description}\n`; }); } // Recommendations if (validation.recommendations.length > 0) { responseText += `\n๐Ÿ’ก **Recommendations**:\n`; validation.recommendations.forEach((rec) => { responseText += `- ${rec}\n`; }); } const overallStatus = validation.overallScore >= 90 ? '๐ŸŽ‰ Refactoring successful!' : validation.overallScore >= 75 ? 'โš ๏ธ Refactoring mostly successful with minor issues' : '๐Ÿšจ Refactoring has significant issues that need attention'; responseText += `\n${overallStatus}`; return { content: [{ type: 'text', text: responseText }] }; } /** * Setup visual test integration */ async setupVisualTestIntegration(args, sessions) { const { testFramework, testPattern, screenshotOnFailure, sessionId } = args; const integration = await this.visualValidationEngine.setupTestIntegration({ testFramework, testPattern, screenshotOnFailure, sessionId }); return { content: [{ type: 'text', text: `๐Ÿงช **Visual Test Integration Setup Complete** ๐Ÿ”ง **Test Framework**: ${testFramework} ๐Ÿ“‹ **Test Pattern**: ${testPattern} ๐Ÿ“ธ **Screenshot on Failure**: ${screenshotOnFailure ? 'Enabled' : 'Disabled'} ๐Ÿ†” **Integration ID**: ${integration.integrationId} โœ… **Configuration Applied**: - Test watcher started for pattern: \`${testPattern}\` - Visual validation hooks installed - Screenshot capture ${screenshotOnFailure ? 'enabled' : 'disabled'} on test failures ๐Ÿ“Š **Next Steps**: 1. Run your tests normally with \`${testFramework === 'jest' ? 'npm test' : testFramework === 'cypress' ? 'npx cypress run' : 'npx playwright test'}\` 2. Visual validation will run automatically during tests 3. Screenshots will be captured for failed tests (if enabled) 4. Use \`analyze_ui_stability\` to review test results and UI changes` }] }; } /** * Analyze UI stability */ async analyzeUIStability(args) { const { timeRange, includePerformance } = args; const analysis = await this.visualValidationEngine.analyzeUIStability({ timeRange: timeRange || '1h', includePerformance }); let responseText = `๐Ÿ“Š **UI Stability Analysis** (${timeRange || '1h'}) ๐ŸŽฏ **Overall Stability Score**: ${analysis.stabilityScore}/100 ๐Ÿ“ˆ **Metrics**: - **Screenshots Analyzed**: ${analysis.screenshotsAnalyzed} - **Visual Changes Detected**: ${analysis.visualChanges} - **Critical Issues**: ${analysis.criticalIssues} - **Minor Issues**: ${analysis.minorIssues} `; if (includePerformance && analysis.performanceMetrics) { responseText += `โšก **Performance Metrics**: - **Average Load Time**: ${analysis.performanceMetrics.avgLoadTime}ms - **Performance Trend**: ${analysis.performanceMetrics.trend} - **Memory Usage**: ${analysis.performanceMetrics.avgMemoryUsage}MB `; } if (analysis.recommendations.length > 0) { responseText += `๐Ÿ’ก **Recommendations**:\n`; analysis.recommendations.forEach((rec) => { responseText += `- ${rec}\n`; }); } const stabilityStatus = analysis.stabilityScore >= 90 ? '๐ŸŸข Excellent' : analysis.stabilityScore >= 75 ? '๐ŸŸก Good' : analysis.stabilityScore >= 60 ? '๐ŸŸ  Fair' : '๐Ÿ”ด Poor'; responseText += `\n**UI Stability Status**: ${stabilityStatus}`; return { content: [{ type: 'text', text: responseText }] }; } /** * Set code quality gates */ async setCodeQualityGates(args) { const { maxFileSize, minTestCoverage, enableSizeWarnings, blockOnFailure } = args; // This would integrate with your existing file size and test coverage monitoring const gates = { maxFileSize: maxFileSize || 300, minTestCoverage: minTestCoverage || 80, enableSizeWarnings: enableSizeWarnings !== false, blockOnFailure: blockOnFailure || false }; return { content: [{ type: 'text', text: `โš™๏ธ **Code Quality Gates Configured** ๐Ÿ“ **File Size Limit**: ${gates.maxFileSize} lines ๐Ÿ“Š **Minimum Test Coverage**: ${gates.minTestCoverage}% โš ๏ธ **Size Warnings**: ${gates.enableSizeWarnings ? 'Enabled' : 'Disabled'} ๐Ÿšซ **Block on Failure**: ${gates.blockOnFailure ? 'Enabled' : 'Disabled'} โœ… **Quality Gates Active**: Your development workflow will now enforce these quality standards. **Integration Points**: - File changes will be checked against size limits - Test coverage will be monitored during development - Visual validation will include quality gate checks - ${gates.blockOnFailure ? 'Operations will be blocked if gates fail' : 'Warnings will be shown for gate failures'}` }] }; } /** * Get change impact analysis */ async getChangeImpactAnalysis(args) { const { changeId, includeVisualDiff, includePerformanceImpact } = args; const analysis = await this.fileChangeMonitor.getChangeImpactAnalysis({ changeId, includeVisualDiff, includePerformanceImpact }); if (!analysis.change) { return { content: [{ type: 'text', text: `โŒ No change found${changeId ? ` with ID '${changeId}'` : ' (no recent changes detected)'}.` }] }; } let responseText = `๐Ÿ” **Change Impact Analysis** ๐Ÿ“… **Change Time**: ${new Date(analysis.change.timestamp).toLocaleString()} ๐Ÿ“ **Files Modified**: ${analysis.change.files?.length || 1} ๐Ÿ”ง **Change Type**: ${analysis.change.type} ๐Ÿ“Š **Impact Summary**: - **UI Impact**: ${analysis.uiImpact} - **Functionality Impact**: ${analysis.functionalityImpact} - **Performance Impact**: ${analysis.performanceImpact} `; if (analysis.change.files && analysis.change.files.length > 0) { responseText += `๐Ÿ“ **Modified Files**:\n`; analysis.change.files.forEach((file) => { responseText += `- \`${file.path}\` (${file.linesChanged} lines)\n`; }); } else { responseText += `๐Ÿ“ **Modified File**: \`${analysis.change.path}\` (${analysis.change.linesChanged || 0} lines)\n`; } if (includeVisualDiff && analysis.visualDiff) { responseText += `\n๐Ÿ“ธ **Visual Changes**: - **Screenshots Compared**: ${analysis.visualDiff.screenshotsCompared} - **Visual Similarity**: ${analysis.visualDiff.similarity}% - **Changes Detected**: ${analysis.visualDiff.changesDetected} `; } if (includePerformanceImpact && analysis.performanceAnalysis) { responseText += `โšก **Performance Analysis**: - **Load Time Change**: ${analysis.performanceAnalysis.loadTimeChange}ms - **Bundle Size Change**: ${analysis.performanceAnalysis.bundleSizeChange}KB - **Memory Impact**: ${analysis.performanceAnalysis.memoryImpact}MB `; } if (analysis.recommendations.length > 0) { responseText += `๐Ÿ’ก **Recommendations**:\n`; analysis.recommendations.forEach((rec) => { responseText += `- ${rec}\n`; }); } return { content: [{ type: 'text', text: responseText }] }; } } export default CodeQualityIntegrationHandler; //# sourceMappingURL=code-quality-integration-handler.js.map