UNPKG

@versatil/sdlc-framework

Version:

๐Ÿš€ AI-Native SDLC framework with 11-MCP ecosystem, RAG memory, OPERA orchestration, and 6 specialized agents achieving ZERO CONTEXT LOSS. Features complete CI/CD pipeline with 7 GitHub workflows (MCP testing, security scanning, performance benchmarking),

554 lines (484 loc) โ€ข 19.8 kB
/** * Autonomous Test Generator - Real-World Scenario Creator * * Generates comprehensive test suites that validate actual functionality * rather than just architectural correctness. Designed to expose vapor-ware. */ import * as fs from 'fs/promises'; import * as path from 'path'; import { VERSATILLogger } from '../utils/logger.js'; import { FrameworkPromise, TestableScenario } from './feature-mapper.js'; import { SimulationScenario, TestCase } from '../agents/simulation-qa.js'; export interface GeneratedTestSuite { id: string; name: string; description: string; category: string; scenarios: SimulationScenario[]; estimatedDuration: number; // milliseconds priority: 'critical' | 'high' | 'medium' | 'low'; prerequisites: string[]; } export interface TestGenerationConfig { includeEdgeCases: boolean; includePerformanceTests: boolean; includeFailureScenarios: boolean; maxTestDuration: number; // milliseconds realWorldSimulation: boolean; } export class TestGenerator { private logger: VERSATILLogger; private projectRoot: string; private testIdCounter: number = 1; constructor(projectRoot: string = process.cwd()) { this.logger = VERSATILLogger.getInstance(); this.projectRoot = projectRoot; } /** * Generate comprehensive test suite from framework promises */ async generateTestSuite( promises: FrameworkPromise[], config: TestGenerationConfig = this.getDefaultConfig() ): Promise<GeneratedTestSuite[]> { this.logger.info('๐Ÿงช Generating autonomous test suite', { promisesToTest: promises.length, includeEdgeCases: config.includeEdgeCases, realWorldSimulation: config.realWorldSimulation }, 'TestGenerator'); const testSuites: GeneratedTestSuite[] = []; // Group promises by category for organized testing const categorizedPromises = this.categorizePromises(promises); for (const [category, categoryPromises] of Object.entries(categorizedPromises)) { const suite = await this.generateCategoryTestSuite(category, categoryPromises, config); testSuites.push(suite); } // Generate integration test suite const integrationSuite = await this.generateIntegrationTestSuite(promises, config); testSuites.push(integrationSuite); // Generate stress test suite const stressSuite = await this.generateStressTestSuite(promises, config); testSuites.push(stressSuite); this.logger.info('โœ… Test suite generation complete', { totalSuites: testSuites.length, totalScenarios: testSuites.reduce((sum, suite) => sum + suite.scenarios.length, 0), estimatedTotalTime: `${Math.round(testSuites.reduce((sum, suite) => sum + suite.estimatedDuration, 0) / 1000)}s` }, 'TestGenerator'); return testSuites; } /** * Generate test suite for a specific category */ private async generateCategoryTestSuite( category: string, promises: FrameworkPromise[], config: TestGenerationConfig ): Promise<GeneratedTestSuite> { const scenarios: SimulationScenario[] = []; for (const promise of promises) { // Generate basic functionality tests const basicScenarios = await this.generateBasicFunctionalityTests(promise); scenarios.push(...basicScenarios); // Generate edge case tests if enabled if (config.includeEdgeCases) { const edgeCaseScenarios = await this.generateEdgeCaseTests(promise); scenarios.push(...edgeCaseScenarios); } // Generate performance tests if enabled if (config.includePerformanceTests) { const performanceScenarios = await this.generatePerformanceTests(promise); scenarios.push(...performanceScenarios); } // Generate failure scenarios if enabled if (config.includeFailureScenarios) { const failureScenarios = await this.generateFailureTests(promise); scenarios.push(...failureScenarios); } } return { id: `test-suite-${category}`, name: `${category.toUpperCase()} Test Suite`, description: `Comprehensive validation of ${category} functionality`, category, scenarios, estimatedDuration: this.estimateTestDuration(scenarios), priority: this.determinePriority(category), prerequisites: this.determinePrerequisites(category) }; } /** * Generate basic functionality tests for a promise */ private async generateBasicFunctionalityTests(promise: FrameworkPromise): Promise<SimulationScenario[]> { const scenarios: SimulationScenario[] = []; for (const testableScenario of promise.testableScenarios) { const scenario: SimulationScenario = { id: `${promise.id}-basic-${this.testIdCounter++}`, name: `Basic Test for ${promise.featureName}`, featureName: promise.featureName, promise: promise.description, testCases: await this.generateTestCasesFromScenario(testableScenario, 'basic'), expectedBehavior: testableScenario.expectedBehavior, status: 'not_tested', evidence: [], confidence: 0.8 }; scenarios.push(scenario); } return scenarios; } /** * Generate edge case tests for a promise */ private async generateEdgeCaseTests(promise: FrameworkPromise): Promise<SimulationScenario[]> { const scenarios: SimulationScenario[] = []; if (promise.category === 'agent-activation') { // Test agent activation edge cases scenarios.push({ id: `${promise.id}-edge-concurrent-${this.testIdCounter++}`, name: `${promise.featureName} - Concurrent Activation`, promise: 'Agent should handle multiple simultaneous activation requests', testCases: [{ id: `concurrent-activation-${this.testIdCounter}`, name: 'concurrent-activation-${this.testIdCounter}', description: 'Trigger same agent multiple times simultaneously', action: 'Edit multiple files that should trigger the same agent concurrently', expectedResult: 'Agent should handle concurrent requests gracefully without conflicts', passed: false }], expectedBehavior: 'Agent processes concurrent requests without errors or conflicts', status: 'not_tested', evidence: [], confidence: 0.7 }); scenarios.push({ id: `${promise.id}-edge-invalid-context-${this.testIdCounter++}`, name: `${promise.featureName} - Invalid Context`, promise: 'Agent should handle invalid or corrupted context gracefully', testCases: [{ id: `invalid-context-${this.testIdCounter}`, name: 'invalid-context-${this.testIdCounter}', description: 'Trigger agent with malformed or missing context data', action: 'Simulate agent activation with corrupted context information', expectedResult: 'Agent should provide meaningful error or fallback response', passed: false }], expectedBehavior: 'Agent gracefully handles invalid context without crashing', status: 'not_tested', evidence: [], confidence: 0.6 }); } return scenarios; } /** * Generate performance tests for a promise */ private async generatePerformanceTests(promise: FrameworkPromise): Promise<SimulationScenario[]> { const scenarios: SimulationScenario[] = []; // General performance test for all promises scenarios.push({ id: `${promise.id}-perf-response-time-${this.testIdCounter++}`, name: `${promise.featureName} - Response Time`, promise: 'Feature should respond within acceptable time limits', testCases: [{ id: `response-time-${this.testIdCounter}`, name: 'response-time-${this.testIdCounter}', description: 'Measure feature response time under normal conditions', action: 'Trigger feature and measure response time', expectedResult: 'Response time should be under 5 seconds for normal operations', passed: false }], expectedBehavior: 'Feature responds quickly without performance degradation', status: 'not_tested', evidence: [], confidence: 0.9 }); if (promise.category === 'agent-activation') { // Agent-specific performance tests scenarios.push({ id: `${promise.id}-perf-activation-time-${this.testIdCounter++}`, name: `${promise.featureName} - Activation Performance`, promise: 'Agent should activate quickly when triggered', testCases: [{ id: `activation-performance-${this.testIdCounter}`, name: 'activation-performance-${this.testIdCounter}', description: 'Measure time from file edit to agent response', action: 'Edit triggering file and measure time to agent activation', expectedResult: 'Agent should activate within 2 seconds of file edit', passed: false }], expectedBehavior: 'Agent activation is fast and responsive', status: 'not_tested', evidence: [], confidence: 0.8 }); } return scenarios; } /** * Generate failure scenario tests */ private async generateFailureTests(promise: FrameworkPromise): Promise<SimulationScenario[]> { const scenarios: SimulationScenario[] = []; scenarios.push({ id: `${promise.id}-failure-network-${this.testIdCounter++}`, name: `${promise.featureName} - Network Failure`, promise: 'Feature should handle network failures gracefully', testCases: [{ id: `network-failure-${this.testIdCounter}`, name: 'network-failure-${this.testIdCounter}', description: 'Test feature behavior when network is unavailable', action: 'Trigger feature while simulating network connectivity issues', expectedResult: 'Feature should provide meaningful error message and graceful degradation', passed: false }], expectedBehavior: 'Feature handles network failures without crashing', status: 'not_tested', evidence: [], confidence: 0.6 }); return scenarios; } /** * Generate integration test suite */ private async generateIntegrationTestSuite( promises: FrameworkPromise[], config: TestGenerationConfig ): Promise<GeneratedTestSuite> { const scenarios: SimulationScenario[] = []; // Agent-to-Agent Integration Test scenarios.push({ id: `integration-agent-handoff-${this.testIdCounter++}`, name: 'Agent-to-Agent Handoff Integration', featureName: 'Agent-to-Agent Handoff Integration', promise: 'Agents should seamlessly hand off context to each other', testCases: [{ id: `agent-handoff-integration-${this.testIdCounter}`, name: 'agent-handoff-integration-${this.testIdCounter}', description: 'Test complete workflow from James-Frontend to Marcus-Backend to Maria-QA', action: 'Create React component that requires API integration and testing', expectedResult: 'All three agents should activate in sequence with preserved context', passed: false }], expectedBehavior: 'Multi-agent workflow completes with zero context loss', status: 'not_tested', evidence: [], confidence: 0.9 }); // MCP Integration Test scenarios.push({ id: `integration-mcp-agent-${this.testIdCounter++}`, name: 'MCP-Agent Integration', featureName: 'MCP-Agent Integration', promise: 'MCP tools should successfully communicate with agents', testCases: [{ id: `mcp-agent-communication-${this.testIdCounter}`, name: 'mcp-agent-communication-${this.testIdCounter}', description: 'Test MCP tool triggering agent activation and receiving responses', action: 'Use MCP versatil_activate_agent tool to activate James-Frontend', expectedResult: 'Agent should activate via MCP and provide structured response', passed: false }], expectedBehavior: 'MCP tools and agents communicate bidirectionally', status: 'not_tested', evidence: [], confidence: 0.8 }); // Real Project Integration Test scenarios.push({ id: `integration-verssai-project-${this.testIdCounter++}`, name: 'VERSSAI Project Integration', featureName: 'VERSSAI Project Integration', promise: 'Framework should work seamlessly with VERSSAI project', testCases: [{ id: `verssai-integration-${this.testIdCounter}`, name: 'verssai-integration-${this.testIdCounter}', description: 'Test framework functionality with actual VERSSAI project files', action: 'Edit VERSSAI React components and verify agent responses', expectedResult: 'Agents should provide contextual suggestions for VERSSAI codebase', passed: false }], expectedBehavior: 'Framework provides real value in production project', status: 'not_tested', evidence: [], confidence: 0.9 }); return { id: 'integration-test-suite', name: 'Integration Test Suite', description: 'End-to-end integration validation of framework components', category: 'integration', scenarios, estimatedDuration: this.estimateTestDuration(scenarios), priority: 'critical', prerequisites: ['All individual component tests must pass'] }; } /** * Generate stress test suite */ private async generateStressTestSuite( promises: FrameworkPromise[], config: TestGenerationConfig ): Promise<GeneratedTestSuite> { const scenarios: SimulationScenario[] = []; // High-frequency activation test scenarios.push({ id: `stress-high-frequency-${this.testIdCounter++}`, name: 'High-Frequency Agent Activation', featureName: 'High-Frequency Agent Activation', promise: 'Framework should handle rapid successive agent activations', testCases: [{ id: `high-frequency-stress-${this.testIdCounter}`, name: 'high-frequency-stress-${this.testIdCounter}', description: 'Rapidly trigger multiple agents in quick succession', action: 'Edit multiple files rapidly to trigger different agents', expectedResult: 'All agents should respond without performance degradation', passed: false }], expectedBehavior: 'Framework maintains performance under high load', status: 'not_tested', evidence: [], confidence: 0.7 }); // Memory usage test scenarios.push({ id: `stress-memory-usage-${this.testIdCounter++}`, name: 'Memory Usage Under Load', featureName: 'Memory Usage Under Load', promise: 'Framework should maintain reasonable memory usage', testCases: [{ id: `memory-stress-${this.testIdCounter}`, name: 'memory-stress-${this.testIdCounter}', description: 'Monitor memory usage during extended framework operation', action: 'Run framework continuously with periodic agent activations', expectedResult: 'Memory usage should remain stable without leaks', passed: false }], expectedBehavior: 'Framework has stable memory usage patterns', status: 'not_tested', evidence: [], confidence: 0.8 }); return { id: 'stress-test-suite', name: 'Stress Test Suite', description: 'Framework performance and stability under load', category: 'stress', scenarios, estimatedDuration: this.estimateTestDuration(scenarios) * 2, // Stress tests take longer priority: 'medium', prerequisites: ['Basic functionality tests must pass'] }; } /** * Generate test cases from a testable scenario */ private async generateTestCasesFromScenario( scenario: TestableScenario, type: 'basic' | 'edge' | 'performance' | 'failure' ): Promise<TestCase[]> { const testCases: TestCase[] = []; // Primary test case testCases.push({ id: `${scenario.id}-${type}-primary`, name: '${scenario.id}-${type}-primary', description: scenario.userAction, expectedResult: scenario.expectedBehavior, passed: false }); // Add measurable criteria as individual test cases scenario.measurableCriteria.forEach((criteria, index) => { testCases.push({ id: `${scenario.id}-${type}-criteria-${index}`, name: '${scenario.id}-${type}-criteria-${index}', description: `Validate that ${criteria.toLowerCase()}`, expectedResult: `Criteria "${criteria}" is met`, passed: false }); }); return testCases; } /** * Categorize promises by their category */ private categorizePromises(promises: FrameworkPromise[]): Record<string, FrameworkPromise[]> { const categorized: Record<string, FrameworkPromise[]> = {}; promises.forEach(promise => { if (!categorized[promise.category]) { categorized[promise.category] = []; } categorized[promise.category]!.push(promise); }); return categorized; } /** * Estimate test duration for scenarios */ private estimateTestDuration(scenarios: SimulationScenario[]): number { return scenarios.reduce((total, scenario) => { const testCaseTime = scenario.testCases.length * 2000; // 2 seconds per test case return total + testCaseTime; }, 0); } /** * Determine priority based on category */ private determinePriority(category: string): 'critical' | 'high' | 'medium' | 'low' { const priorityMap: Record<string, 'critical' | 'high' | 'medium' | 'low'> = { 'agent-activation': 'critical', 'mcp-integration': 'critical', 'opera-methodology': 'high', 'context-preservation': 'high', 'quality-gates': 'high', 'testing-integration': 'medium' }; return priorityMap[category] || 'medium'; } /** * Determine prerequisites for test category */ private determinePrerequisites(category: string): string[] { const prerequisiteMap: Record<string, string[]> = { 'agent-activation': ['Framework installed', 'Agent registry initialized'], 'mcp-integration': ['MCP server running', 'Agent system operational'], 'opera-methodology': ['All agents functional', 'Context system operational'], 'context-preservation': ['Agent activation working', 'Memory system functional'], 'quality-gates': ['Maria-QA agent operational', 'Testing infrastructure ready'], 'testing-integration': ['Jest configured', 'Playwright available'] }; return prerequisiteMap[category] || ['Basic framework functionality']; } /** * Get default test generation configuration */ private getDefaultConfig(): TestGenerationConfig { return { includeEdgeCases: true, includePerformanceTests: true, includeFailureScenarios: true, maxTestDuration: 300000, // 5 minutes max realWorldSimulation: true }; } /** * Export generated test suites to file */ async exportTestSuites(testSuites: GeneratedTestSuite[], filePath?: string): Promise<string> { const exportPath = filePath || path.join(this.projectRoot, '.versatil', 'generated-test-suites.json'); const exportData = { timestamp: new Date().toISOString(), generator: 'VERSATIL TestGenerator', totalSuites: testSuites.length, totalScenarios: testSuites.reduce((sum, suite) => sum + suite.scenarios.length, 0), estimatedTotalDuration: testSuites.reduce((sum, suite) => sum + suite.estimatedDuration, 0), testSuites }; // Ensure directory exists await fs.mkdir(path.dirname(exportPath), { recursive: true }); await fs.writeFile(exportPath, JSON.stringify(exportData, null, 2)); return exportPath; } }