UNPKG

ai-debug-local-mcp

Version:

🎯 ENHANCED AI GUIDANCE v4.1.2: Dramatically improved tool descriptions help AI users choose the right tools instead of 'close enough' options. Ultra-fast keyboard automation (10x speed), universal recording, multi-ecosystem debugging support, and compreh

440 lines • 18 kB
import { BaseToolHandler } from './handlers/base-handler.js'; import { DebugSessionRecorder } from './debug-session-recorder.js'; import { TestGeneratorAI } from './test-generator-ai.js'; import { TestReviewerAI } from './test-reviewer-ai.js'; export class TestGenerationHandler extends BaseToolHandler { activeRecorders = new Map(); testGenerator; testReviewer; tools = [ { name: 'start_ai_test_recording', description: 'Begin recording debugging session with AI test generation enabled', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Debug session ID' }, testIntent: { type: 'string', description: 'What this test should validate (e.g., "user login flow")' }, userStory: { type: 'string', description: 'Optional user story context' }, tags: { type: 'array', items: { type: 'string' }, description: 'Optional tags for test categorization' }, trustLevel: { type: 'string', enum: ['learning', 'ai_assisted', 'batch_review', 'full_auto'], description: 'Automation level for test approval', default: 'ai_assisted' } }, required: ['sessionId', 'testIntent'] } }, { name: 'stop_ai_test_recording', description: 'Stop recording and optionally generate tests immediately', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Debug session ID' }, generateTests: { type: 'boolean', description: 'Whether to generate tests immediately', default: true }, framework: { type: 'string', enum: ['playwright', 'jest', 'cypress'], description: 'Test framework to generate for', default: 'playwright' } }, required: ['sessionId'] } }, { name: 'generate_tests_from_session', description: 'Generate test code from a recorded debugging session', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Debug session ID or session data' }, framework: { type: 'string', enum: ['playwright', 'jest', 'cypress'], description: 'Test framework to generate for', default: 'playwright' }, includeVisualAssertions: { type: 'boolean', description: 'Include visual regression assertions', default: false } }, required: ['sessionId'] } }, { name: 'review_generated_tests', description: 'AI review of generated test code for quality and best practices', inputSchema: { type: 'object', properties: { testCode: { type: 'string', description: 'The test code to review' }, framework: { type: 'string', enum: ['playwright', 'jest', 'cypress'], description: 'Test framework being used' }, sessionId: { type: 'string', description: 'Optional session ID for context' }, qualityThreshold: { type: 'number', description: 'Quality threshold for auto-approval (0.0-1.0)', default: 0.85, minimum: 0, maximum: 1 } }, required: ['testCode', 'framework'] } }, { name: 'approve_ai_tests', description: 'Approve or reject AI-generated tests based on review results', inputSchema: { type: 'object', properties: { sessionId: { type: 'string', description: 'Session ID' }, testCode: { type: 'string', description: 'The test code to approve/reject' }, reviewResult: { type: 'object', description: 'Previous review result from review_generated_tests', properties: { overallScore: { type: 'number' }, approved: { type: 'boolean' }, feedback: { type: 'string' } } }, action: { type: 'string', enum: ['approve', 'reject', 'request_fixes', 'escalate_to_human'], description: 'Action to take' }, trustLevel: { type: 'string', enum: ['learning', 'ai_assisted', 'batch_review', 'full_auto'], description: 'Current trust level' } }, required: ['sessionId', 'testCode', 'action'] } } ]; constructor() { super(); this.testGenerator = new TestGeneratorAI(); this.testReviewer = new TestReviewerAI(); } async handle(toolName, args, sessions) { try { switch (toolName) { case 'start_ai_test_recording': return await this.startAiTestRecording(args); case 'stop_ai_test_recording': return await this.stopAiTestRecording(args); case 'generate_tests_from_session': return await this.generateTestsFromSession(args); case 'review_generated_tests': return await this.reviewGeneratedTests(args); case 'approve_ai_tests': return await this.approveAiTests(args); default: throw new Error(`Unknown tool: ${toolName}`); } } catch (error) { return this.createErrorResponse(error); } } async startAiTestRecording(args) { const { sessionId, testIntent, userStory, tags, trustLevel = 'ai_assisted' } = args; // Validate required parameters if (!sessionId) { throw new Error('sessionId is required'); } if (!testIntent) { throw new Error('testIntent is required'); } // Check if recording already exists if (this.activeRecorders.has(sessionId)) { throw new Error(`Recording already active for session ${sessionId}`); } // For now, we'll simulate page creation - in real implementation, // this would get the page from the existing debug session const mockPage = { on: () => { }, url: () => 'http://localhost:8200', viewportSize: () => ({ width: 1920, height: 1080 }), mainFrame: () => ({ url: () => 'http://localhost:8200' }) }; const recorder = new DebugSessionRecorder(mockPage, { bufferSize: 1000, privacy: { redactSensitiveData: true, sensitiveFields: ['password', 'token', 'key', 'secret'] } }); const metadata = { testIntent, userStory, tags }; await recorder.startRecording(metadata); this.activeRecorders.set(sessionId, recorder); return { success: true, sessionId: sessionId, // Use the provided sessionId message: `AI test recording started for session ${sessionId}`, metadata, trustLevel, recordingId: recorder.sessionId // Keep the recorder's internal ID separate }; } async stopAiTestRecording(args) { const { sessionId, generateTests = true, framework = 'playwright' } = args; const recorder = this.activeRecorders.get(sessionId); if (!recorder) { throw new Error(`No active recording found for session ${sessionId}`); } const session = await recorder.stopRecording(); this.activeRecorders.delete(sessionId); const result = { success: true, sessionId, session: { id: session.id, duration: session.duration, eventCount: session.events.length, url: session.url, metadata: session.metadata }, message: `Recording stopped for session ${sessionId}` }; if (generateTests) { try { const testCode = await this.testGenerator.generateTest(session); const review = await this.testReviewer.reviewTest(testCode, framework); result.generatedTest = { code: testCode, framework, review, approved: review.approved, autoFixSuggestions: review.autoFixSuggestions || [] }; result.message += `. Generated ${framework} test with quality score: ${review.overallScore.toFixed(2)}`; } catch (error) { result.testGenerationError = error instanceof Error ? error.message : 'Unknown error'; result.message += `, but test generation failed`; } } return result; } async generateTestsFromSession(args) { const { sessionId, framework = 'playwright', includeVisualAssertions = false } = args; // Try to get session from active recorder first let session; const recorder = this.activeRecorders.get(sessionId); if (recorder) { // Get current session state without stopping recording const events = recorder.getEvents(); session = { id: recorder.sessionId, timestamp: new Date(recorder.startTime), duration: Date.now() - recorder.startTime, url: 'http://localhost:8200', // Would get from page in real implementation events, metadata: { testIntent: 'Active session' } }; } else { // In real implementation, would load session from storage throw new Error(`Session ${sessionId} not found. Ensure the session is active or has been recorded.`); } const generator = new TestGeneratorAI({ framework, includeVisualAssertions }); const testCode = await generator.generateTest(session); const review = await this.testReviewer.reviewTest(testCode, framework); // Generate additional test suggestions const additionalTests = generator.suggestAdditionalTests(session); const criticalPaths = generator.detectCriticalPaths(session.events); return { success: true, sessionId, testCode, framework, review: { overallScore: review.overallScore, approved: review.approved, feedback: review.feedback, criteria: review.criteria, autoFixSuggestions: review.autoFixSuggestions, escalateToHuman: review.escalateToHuman }, additionalSuggestions: additionalTests, criticalPaths, session: { eventCount: session.events.length, duration: session.duration, metadata: session.metadata } }; } async reviewGeneratedTests(args) { const { testCode, framework, sessionId, qualityThreshold = 0.85 } = args; const reviewer = new TestReviewerAI({ qualityThreshold }); const review = await reviewer.reviewTest(testCode, framework); return { success: true, sessionId, review: { overallScore: review.overallScore, approved: review.approved, feedback: review.feedback, criteria: { clarity: review.criteria.clarity, robustness: review.criteria.robustness, maintainability: review.criteria.maintainability, security: review.criteria.security }, autoFixSuggestions: review.autoFixSuggestions || [], escalateToHuman: review.escalateToHuman, timestamp: review.timestamp }, recommendations: this.generateRecommendations(review), qualityThreshold }; } async approveAiTests(args) { const { sessionId, testCode, reviewResult, action, trustLevel } = args; const result = { success: true, sessionId, action, trustLevel, timestamp: new Date() }; switch (action) { case 'approve': result.message = 'Test approved for production use'; result.approved = true; result.nextSteps = [ 'Save test to appropriate test directory', 'Add to CI/CD pipeline', 'Update test documentation' ]; break; case 'reject': result.message = 'Test rejected - requires significant improvements'; result.approved = false; result.nextSteps = [ 'Address feedback from review', 'Regenerate test with improvements', 'Consider manual test writing for complex scenarios' ]; break; case 'request_fixes': result.message = 'Test needs fixes but shows promise'; result.approved = false; result.nextSteps = [ 'Apply auto-fix suggestions if available', 'Address specific feedback points', 'Re-submit for review' ]; if (reviewResult?.autoFixSuggestions) { result.suggestedFixes = reviewResult.autoFixSuggestions; } break; case 'escalate_to_human': result.message = 'Test escalated for human review'; result.approved = false; result.escalated = true; result.nextSteps = [ 'Add to human review queue', 'Provide context and original session data', 'Wait for human decision' ]; break; default: throw new Error(`Unknown action: ${action}`); } // Update trust level based on approval patterns (in real implementation) if (action === 'approve' && trustLevel === 'ai_assisted') { result.trustLevelRecommendation = 'Consider upgrading to batch_review after 10+ successful approvals'; } return result; } generateRecommendations(review) { const recommendations = []; if (review.overallScore < 0.6) { recommendations.push('Consider regenerating the test with clearer session recording'); recommendations.push('Ensure the debugging session captured the complete user flow'); } if (!review.criteria.robustness.avoidsBrittleSelectors) { recommendations.push('Update application to use data-testid attributes for better test stability'); } if (!review.criteria.security.noHardcodedSecrets) { recommendations.push('Review and sanitize any sensitive data in test fixtures'); } if (review.escalateToHuman) { recommendations.push('This test complexity requires human oversight before production use'); } if (review.overallScore >= 0.9) { recommendations.push('Excellent test quality! Consider this as a template for similar scenarios'); } return recommendations; } // Utility method to get recording status getActiveRecordings() { return Array.from(this.activeRecorders.keys()); } // Utility method to get recorder by session ID getRecorder(sessionId) { return this.activeRecorders.get(sessionId); } } //# sourceMappingURL=test-generation-handler.js.map