UNPKG

agentic-qe

Version:

Agentic Quality Engineering Fleet System - AI-driven quality management platform

972 lines (957 loc) 40.7 kB
"use strict"; /** * PerformanceTesterAgent - Load testing and bottleneck detection specialist * * Implements load testing orchestration (JMeter, K6, Gatling, Artillery), * bottleneck detection, resource monitoring, SLA validation, performance * regression detection, and load pattern generation. * * Based on SPARC methodology and AQE Fleet specification */ Object.defineProperty(exports, "__esModule", { value: true }); exports.PerformanceTesterAgent = void 0; const BaseAgent_1 = require("./BaseAgent"); const types_1 = require("../types"); // ============================================================================ // Performance Tester Agent Implementation // ============================================================================ class PerformanceTesterAgent extends BaseAgent_1.BaseAgent { constructor(config) { const defaultThresholds = { maxLatencyP95: 500, maxLatencyP99: 1000, minThroughput: 1000, maxErrorRate: 0.01, maxCpuUsage: 80, maxMemoryUsage: 85 }; super({ ...config, type: types_1.QEAgentType.PERFORMANCE_TESTER, capabilities: [ { name: 'load-testing-orchestration', version: '1.0.0', description: 'Execute load tests using multiple tools (K6, JMeter, Gatling, Artillery)', parameters: { supportedTools: ['k6', 'jmeter', 'gatling', 'artillery'], maxVirtualUsers: 10000, maxDuration: 7200 // 2 hours } }, { name: 'bottleneck-detection', version: '1.0.0', description: 'Identify performance bottlenecks in CPU, memory, I/O, database, and network', parameters: { analysisTypes: ['cpu', 'memory', 'io', 'database', 'network'], detectionAlgorithm: 'statistical-analysis' } }, { name: 'resource-monitoring', version: '1.0.0', description: 'Real-time resource monitoring during load tests', parameters: { metrics: ['cpu', 'memory', 'disk', 'network', 'connections'], samplingInterval: 1000 // 1 second } }, { name: 'sla-validation', version: '1.0.0', description: 'Validate performance against SLA thresholds', parameters: { thresholds: config.thresholds || defaultThresholds } }, { name: 'performance-regression-detection', version: '1.0.0', description: 'Detect performance regressions by comparing against baselines', parameters: { comparisonStrategy: 'statistical', regressionThreshold: 0.1 // 10% degradation } }, { name: 'load-pattern-generation', version: '1.0.0', description: 'Generate realistic load patterns (spike, ramp-up, stress, soak)', parameters: { patterns: ['constant', 'ramp-up', 'spike', 'stress', 'soak'] } } ] }); this.activeTests = new Map(); this.baselines = new Map(); this.config = { ...config, tools: { loadTesting: 'k6', monitoring: ['prometheus'], ...config.tools }, thresholds: { ...defaultThresholds, ...config.thresholds }, loadProfile: { virtualUsers: 100, duration: 300, rampUpTime: 30, pattern: 'ramp-up', ...config.loadProfile } }; } // ============================================================================ // BaseAgent Implementation // ============================================================================ async initializeComponents() { console.log(`PerformanceTesterAgent ${this.agentId.id} initializing...`); // Initialize load testing client await this.initializeLoadTestingClient(); // Initialize monitoring client await this.initializeMonitoringClient(); // Load historical baselines await this.loadBaselines(); // Register event handlers this.registerPerformanceEventHandlers(); console.log(`PerformanceTesterAgent ${this.agentId.id} initialized successfully`); } async performTask(task) { const { type, payload } = task; console.log(`PerformanceTesterAgent executing ${type} task: ${task.id}`); switch (type) { case 'run-load-test': return await this.runLoadTest(payload); case 'detect-bottlenecks': return await this.detectBottlenecks(payload); case 'validate-sla': return await this.validateSLA(payload); case 'detect-regressions': return await this.detectRegressions(payload); case 'generate-load-pattern': return await this.generateLoadPattern(payload); case 'establish-baseline': return await this.establishBaseline(payload); case 'analyze-performance': return await this.analyzePerformance(payload); default: throw new Error(`Unsupported task type: ${type}`); } } async loadKnowledge() { // Load performance testing knowledge from memory const storedKnowledge = await this.retrieveMemory('performance-knowledge'); if (storedKnowledge) { console.log('Loaded performance testing knowledge from memory'); } // Load test results history const resultsHistory = await this.memoryStore.retrieve('aqe/performance/results/history'); if (resultsHistory) { console.log('Loaded test results history from memory'); } } async cleanup() { // Save current state await this.storeMemory('performance-state', { activeTests: Array.from(this.activeTests.entries()), baselines: Array.from(this.baselines.entries()), timestamp: new Date() }); // Cleanup load testing client if (this.loadTestingClient) { console.log('Cleaning up load testing client'); // In real implementation, close connections this.loadTestingClient = undefined; } // Cleanup monitoring client if (this.monitoringClient) { console.log('Cleaning up monitoring client'); // In real implementation, close connections this.monitoringClient = undefined; } this.activeTests.clear(); console.log(`PerformanceTesterAgent ${this.agentId.id} cleaned up`); } // ============================================================================ // Load Testing Orchestration // ============================================================================ async runLoadTest(metadata) { const testConfig = this.parseTestConfig(metadata); const testId = `loadtest-${Date.now()}`; console.log(`Starting load test ${testId} with ${testConfig.loadProfile.virtualUsers} VUs`); // Emit test started event this.emitEvent('performance.test.started', { testId, config: testConfig }, 'medium'); const startTime = new Date(); try { // Generate load test script const script = this.generateLoadTestScript(testConfig); // Execute load test const rawResults = await this.executeLoadTest(script, testConfig); // Collect monitoring data const monitoringData = await this.collectMonitoringData(testId); // Analyze results const metrics = this.analyzeLoadTestResults(rawResults, monitoringData); // Detect bottlenecks const bottlenecks = await this.analyzeBottlenecks(metrics); // Validate SLA const slaViolations = this.checkSLAViolations(metrics, testConfig.thresholds); // Generate recommendations const recommendations = this.generateRecommendations(metrics, bottlenecks, slaViolations); const endTime = new Date(); const result = { id: testId, startTime, endTime, duration: endTime.getTime() - startTime.getTime(), metrics, bottlenecks, slaViolations, recommendations }; // Store results await this.storeTestResults(testId, result); // Emit test completed event this.emitEvent('performance.test.completed', { testId, metrics, passed: slaViolations.length === 0 }, slaViolations.length > 0 ? 'high' : 'medium'); return result; } catch (error) { console.error(`Load test ${testId} failed:`, error); this.emitEvent('performance.test.failed', { testId, error: error instanceof Error ? error.message : String(error) }, 'high'); throw error; } } generateLoadTestScript(config) { const tool = this.config.tools.loadTesting; switch (tool) { case 'k6': return this.generateK6Script(config); case 'jmeter': return this.generateJMeterScript(config); case 'gatling': return this.generateGatlingScript(config); case 'artillery': return this.generateArtilleryScript(config); default: throw new Error(`Unsupported load testing tool: ${tool}`); } } generateK6Script(config) { const { loadProfile, thresholds } = config; return ` import http from 'k6/http'; import { check, sleep } from 'k6'; import { Rate, Trend } from 'k6/metrics'; const errorRate = new Rate('errors'); const latency = new Trend('latency'); export let options = { stages: ${this.generateStages(loadProfile)}, thresholds: { 'http_req_duration': ['p(95)<${thresholds.maxLatencyP95}', 'p(99)<${thresholds.maxLatencyP99}'], 'http_reqs': ['rate>${thresholds.minThroughput}'], 'errors': ['rate<${thresholds.maxErrorRate}'] } }; export default function() { const endpoints = ${JSON.stringify(loadProfile.endpoints || [])}; // Select endpoint based on distribution const endpoint = selectEndpoint(endpoints); const response = http.request( endpoint.method, \`${config.targetUrl}\${endpoint.path}\`, endpoint.payload ); check(response, { 'status is 2xx': (r) => r.status >= 200 && r.status < 300, 'response time < ${thresholds.maxLatencyP95}ms': (r) => r.timings.duration < ${thresholds.maxLatencyP95} }); errorRate.add(response.status >= 400); latency.add(response.timings.duration); sleep(Math.random() * 2 + 1); } function selectEndpoint(endpoints) { if (endpoints.length === 0) { return { path: '/', method: 'GET', percentage: 100 }; } const rand = Math.random() * 100; let cumulative = 0; for (const endpoint of endpoints) { cumulative += endpoint.percentage; if (rand <= cumulative) { return endpoint; } } return endpoints[0]; } `.trim(); } generateStages(profile) { switch (profile.pattern) { case 'constant': return `[ { duration: '${profile.duration}s', target: ${profile.virtualUsers} } ]`; case 'ramp-up': return `[ { duration: '${profile.rampUpTime}s', target: ${profile.virtualUsers} }, { duration: '${profile.duration - profile.rampUpTime}s', target: ${profile.virtualUsers} } ]`; case 'spike': const spikeVUs = profile.virtualUsers * 3; return `[ { duration: '${profile.rampUpTime}s', target: ${profile.virtualUsers} }, { duration: '30s', target: ${spikeVUs} }, { duration: '30s', target: ${profile.virtualUsers} }, { duration: '${profile.duration - profile.rampUpTime - 60}s', target: ${profile.virtualUsers} } ]`; case 'stress': return `[ { duration: '${Math.floor(profile.duration / 4)}s', target: ${profile.virtualUsers} }, { duration: '${Math.floor(profile.duration / 4)}s', target: ${profile.virtualUsers * 2} }, { duration: '${Math.floor(profile.duration / 4)}s', target: ${profile.virtualUsers * 3} }, { duration: '${Math.floor(profile.duration / 4)}s', target: ${profile.virtualUsers * 4} } ]`; case 'soak': return `[ { duration: '${profile.rampUpTime}s', target: ${profile.virtualUsers} }, { duration: '${profile.duration}s', target: ${profile.virtualUsers} } ]`; default: return `[{ duration: '${profile.duration}s', target: ${profile.virtualUsers} }]`; } } generateJMeterScript(config) { // JMeter XML test plan generation return `<?xml version="1.0" encoding="UTF-8"?> <jmeterTestPlan version="1.2"> <hashTree> <TestPlan> <elementProp name="TestPlan.user_defined_variables" elementType="Arguments"> <collectionProp name="Arguments.arguments"/> </elementProp> </TestPlan> <hashTree> <ThreadGroup> <stringProp name="ThreadGroup.num_threads">${config.loadProfile.virtualUsers}</stringProp> <stringProp name="ThreadGroup.ramp_time">${config.loadProfile.rampUpTime}</stringProp> <stringProp name="ThreadGroup.duration">${config.loadProfile.duration}</stringProp> </ThreadGroup> </hashTree> </hashTree> </jmeterTestPlan>`; } generateGatlingScript(config) { // Gatling Scala simulation return ` import io.gatling.core.Predef._ import io.gatling.http.Predef._ class LoadTestSimulation extends Simulation { val httpProtocol = http .baseUrl("${config.targetUrl}") .acceptHeader("application/json") val scn = scenario("Load Test") .exec(http("request") .get("/") .check(status.is(200)) .check(responseTimeInMillis.lt(${config.thresholds.maxLatencyP95}))) setUp( scn.inject(rampUsers(${config.loadProfile.virtualUsers}) during (${config.loadProfile.rampUpTime} seconds)) ).protocols(httpProtocol) } `.trim(); } generateArtilleryScript(config) { // Artillery YAML configuration return JSON.stringify({ config: { target: config.targetUrl, phases: [ { duration: config.loadProfile.rampUpTime, arrivalRate: Math.floor(config.loadProfile.virtualUsers / config.loadProfile.rampUpTime), rampTo: config.loadProfile.virtualUsers } ] }, scenarios: [ { flow: [ { get: { url: '/' } } ] } ] }, null, 2); } async executeLoadTest(script, config) { // Simulate load test execution // In real implementation, execute the actual tool console.log(`Executing load test with ${config.loadProfile.virtualUsers} VUs for ${config.loadProfile.duration}s`); // Simulate test duration await new Promise(resolve => setTimeout(resolve, 1000)); // Return simulated results return { requests: { total: config.loadProfile.virtualUsers * Math.floor(config.loadProfile.duration / 2), successful: config.loadProfile.virtualUsers * Math.floor(config.loadProfile.duration / 2) * 0.98, failed: config.loadProfile.virtualUsers * Math.floor(config.loadProfile.duration / 2) * 0.02 }, latencies: Array.from({ length: 1000 }, () => Math.random() * 1000), throughput: config.loadProfile.virtualUsers * 2 }; } async collectMonitoringData(testId) { // Simulate monitoring data collection // In real implementation, query monitoring platform return { cpu: { samples: Array.from({ length: 100 }, () => 60 + Math.random() * 30), average: 75, peak: 92 }, memory: { samples: Array.from({ length: 100 }, () => 4 + Math.random() * 2), average: 5.2, peak: 6.8 }, network: { samples: Array.from({ length: 100 }, () => 100 + Math.random() * 50), average: 125, peak: 180 } }; } analyzeLoadTestResults(rawResults, monitoringData) { const latencies = rawResults.latencies.sort((a, b) => a - b); return { requests: { total: rawResults.requests.total, successful: rawResults.requests.successful, failed: rawResults.requests.failed, errorRate: (rawResults.requests.failed / rawResults.requests.total) * 100 }, latency: { min: latencies[0], max: latencies[latencies.length - 1], mean: latencies.reduce((a, b) => a + b, 0) / latencies.length, p50: latencies[Math.floor(latencies.length * 0.5)], p95: latencies[Math.floor(latencies.length * 0.95)], p99: latencies[Math.floor(latencies.length * 0.99)] }, throughput: { requestsPerSecond: rawResults.throughput, bytesPerSecond: rawResults.throughput * 1024 }, resources: { cpu: { current: monitoringData.cpu.samples[monitoringData.cpu.samples.length - 1], average: monitoringData.cpu.average, peak: monitoringData.cpu.peak, unit: '%' }, memory: { current: monitoringData.memory.samples[monitoringData.memory.samples.length - 1], average: monitoringData.memory.average, peak: monitoringData.memory.peak, unit: 'GB' }, network: { current: monitoringData.network.samples[monitoringData.network.samples.length - 1], average: monitoringData.network.average, peak: monitoringData.network.peak, unit: 'Mbps' } } }; } // ============================================================================ // Bottleneck Detection // ============================================================================ async detectBottlenecks(metadata) { const { metrics, testId } = metadata; console.log(`Detecting bottlenecks for test ${testId}`); const bottlenecks = []; // Analyze CPU bottlenecks if (metrics.resources.cpu.peak > this.config.thresholds.maxCpuUsage) { bottlenecks.push({ type: 'CPU', severity: this.calculateSeverity(metrics.resources.cpu.peak, this.config.thresholds.maxCpuUsage), location: 'Application Server', description: `CPU usage reached ${metrics.resources.cpu.peak}%, exceeding threshold of ${this.config.thresholds.maxCpuUsage}%`, metrics: { current: metrics.resources.cpu.peak, average: metrics.resources.cpu.average, threshold: this.config.thresholds.maxCpuUsage }, remediation: [ 'Profile application to identify CPU-intensive operations', 'Consider horizontal scaling', 'Optimize algorithms and reduce computational complexity', 'Implement caching for expensive operations' ], impactEstimate: 'High - May cause request timeouts and degraded user experience' }); } // Analyze memory bottlenecks if (metrics.resources.memory.peak > this.config.thresholds.maxMemoryUsage) { bottlenecks.push({ type: 'MEMORY', severity: this.calculateSeverity(metrics.resources.memory.peak, this.config.thresholds.maxMemoryUsage), location: 'Application Server', description: `Memory usage reached ${metrics.resources.memory.peak}GB, exceeding threshold`, metrics: { current: metrics.resources.memory.peak, average: metrics.resources.memory.average, threshold: this.config.thresholds.maxMemoryUsage }, remediation: [ 'Check for memory leaks', 'Implement connection pooling', 'Optimize data structures', 'Increase available memory' ], impactEstimate: 'Critical - May cause OOM errors and application crashes' }); } // Analyze latency bottlenecks if (metrics.latency.p95 > this.config.thresholds.maxLatencyP95) { const latencyType = this.identifyLatencyBottleneck(metrics); bottlenecks.push({ type: latencyType, severity: 'HIGH', location: 'Request Pipeline', description: `P95 latency is ${metrics.latency.p95}ms, exceeding ${this.config.thresholds.maxLatencyP95}ms threshold`, metrics: { p95: metrics.latency.p95, p99: metrics.latency.p99, mean: metrics.latency.mean, threshold: this.config.thresholds.maxLatencyP95 }, remediation: [ 'Optimize database queries', 'Implement caching layer', 'Add CDN for static assets', 'Review external API calls' ], impactEstimate: 'High - Affects user experience and conversion rates' }); } // Store bottleneck analysis await this.memoryStore.store(`aqe/performance/bottlenecks/${testId}`, { bottlenecks, analyzedAt: new Date(), testId }); // Emit bottleneck detected events for (const bottleneck of bottlenecks) { if (bottleneck.severity === 'CRITICAL' || bottleneck.severity === 'HIGH') { this.emitEvent('performance.bottleneck.detected', { type: bottleneck.type, severity: bottleneck.severity, metrics: bottleneck.metrics }, bottleneck.severity === 'CRITICAL' ? 'critical' : 'high'); } } return bottlenecks; } async analyzeBottlenecks(metrics) { return await this.detectBottlenecks({ metrics, testId: 'current' }); } identifyLatencyBottleneck(metrics) { // Heuristic to identify latency source if (metrics.resources.cpu.peak > 80) return 'CPU'; if (metrics.resources.memory.peak > 80) return 'MEMORY'; if (metrics.latency.p99 > metrics.latency.p95 * 2) return 'DATABASE'; return 'NETWORK'; } calculateSeverity(current, threshold) { const ratio = current / threshold; if (ratio > 1.5) return 'CRITICAL'; if (ratio > 1.25) return 'HIGH'; if (ratio > 1.1) return 'MEDIUM'; return 'LOW'; } // ============================================================================ // SLA Validation // ============================================================================ async validateSLA(metadata) { const { metrics, thresholds } = metadata; console.log('Validating SLA thresholds'); const violations = []; // Check P95 latency if (metrics.latency.p95 > thresholds.maxLatencyP95) { violations.push({ metric: 'latency_p95', threshold: thresholds.maxLatencyP95, actual: metrics.latency.p95, violation: ((metrics.latency.p95 - thresholds.maxLatencyP95) / thresholds.maxLatencyP95) * 100, severity: this.calculateViolationSeverity(metrics.latency.p95, thresholds.maxLatencyP95), duration: 0 // Would be calculated from monitoring data }); } // Check P99 latency if (metrics.latency.p99 > thresholds.maxLatencyP99) { violations.push({ metric: 'latency_p99', threshold: thresholds.maxLatencyP99, actual: metrics.latency.p99, violation: ((metrics.latency.p99 - thresholds.maxLatencyP99) / thresholds.maxLatencyP99) * 100, severity: this.calculateViolationSeverity(metrics.latency.p99, thresholds.maxLatencyP99), duration: 0 }); } // Check throughput if (metrics.throughput.requestsPerSecond < thresholds.minThroughput) { violations.push({ metric: 'throughput', threshold: thresholds.minThroughput, actual: metrics.throughput.requestsPerSecond, violation: ((thresholds.minThroughput - metrics.throughput.requestsPerSecond) / thresholds.minThroughput) * 100, severity: 'HIGH', duration: 0 }); } // Check error rate if (metrics.requests.errorRate > thresholds.maxErrorRate * 100) { violations.push({ metric: 'error_rate', threshold: thresholds.maxErrorRate * 100, actual: metrics.requests.errorRate, violation: ((metrics.requests.errorRate - thresholds.maxErrorRate * 100) / (thresholds.maxErrorRate * 100)) * 100, severity: 'CRITICAL', duration: 0 }); } // Emit SLA violation events if (violations.length > 0) { this.emitEvent('performance.sla.violated', { violationCount: violations.length, violations }, 'critical'); } return { passed: violations.length === 0, violations }; } async checkSLAViolationsAsync(metrics, thresholds) { const result = await this.validateSLA({ metrics, thresholds }); return result.violations; } checkSLAViolations(metrics, thresholds) { // Synchronous version for internal use const violations = []; // Check P95 latency if (metrics.latency.p95 > thresholds.maxLatencyP95) { violations.push({ metric: 'latency_p95', threshold: thresholds.maxLatencyP95, actual: metrics.latency.p95, violation: ((metrics.latency.p95 - thresholds.maxLatencyP95) / thresholds.maxLatencyP95) * 100, severity: this.calculateViolationSeverity(metrics.latency.p95, thresholds.maxLatencyP95), duration: 0 }); } // Check P99 latency if (metrics.latency.p99 > thresholds.maxLatencyP99) { violations.push({ metric: 'latency_p99', threshold: thresholds.maxLatencyP99, actual: metrics.latency.p99, violation: ((metrics.latency.p99 - thresholds.maxLatencyP99) / thresholds.maxLatencyP99) * 100, severity: this.calculateViolationSeverity(metrics.latency.p99, thresholds.maxLatencyP99), duration: 0 }); } // Check throughput if (metrics.throughput.requestsPerSecond < thresholds.minThroughput) { violations.push({ metric: 'throughput', threshold: thresholds.minThroughput, actual: metrics.throughput.requestsPerSecond, violation: ((thresholds.minThroughput - metrics.throughput.requestsPerSecond) / thresholds.minThroughput) * 100, severity: 'HIGH', duration: 0 }); } // Check error rate if (metrics.requests.errorRate > thresholds.maxErrorRate * 100) { violations.push({ metric: 'error_rate', threshold: thresholds.maxErrorRate * 100, actual: metrics.requests.errorRate, violation: ((metrics.requests.errorRate - thresholds.maxErrorRate * 100) / (thresholds.maxErrorRate * 100)) * 100, severity: 'CRITICAL', duration: 0 }); } return violations; } calculateViolationSeverity(actual, threshold) { return this.calculateSeverity(actual, threshold); } // ============================================================================ // Performance Regression Detection // ============================================================================ async detectRegressions(metadata) { const { currentMetrics, baselineId } = metadata; console.log(`Detecting performance regressions against baseline ${baselineId}`); // Retrieve baseline const baseline = this.baselines.get(baselineId); if (!baseline) { throw new Error(`Baseline ${baselineId} not found`); } const regressions = []; const improvements = []; // Compare latency const latencyDelta = ((currentMetrics.latency.p95 - baseline.metrics.latency.p95) / baseline.metrics.latency.p95); if (latencyDelta > 0.1) { // 10% degradation threshold regressions.push({ metric: 'latency_p95', baseline: baseline.metrics.latency.p95, current: currentMetrics.latency.p95, degradation: latencyDelta * 100, severity: this.calculateRegressionSeverity(latencyDelta) }); } else if (latencyDelta < -0.1) { // 10% improvement improvements.push({ metric: 'latency_p95', baseline: baseline.metrics.latency.p95, current: currentMetrics.latency.p95, improvement: Math.abs(latencyDelta) * 100 }); } // Compare throughput const throughputDelta = ((currentMetrics.throughput.requestsPerSecond - baseline.metrics.throughput.requestsPerSecond) / baseline.metrics.throughput.requestsPerSecond); if (throughputDelta < -0.1) { // 10% degradation regressions.push({ metric: 'throughput', baseline: baseline.metrics.throughput.requestsPerSecond, current: currentMetrics.throughput.requestsPerSecond, degradation: Math.abs(throughputDelta) * 100, severity: this.calculateRegressionSeverity(Math.abs(throughputDelta)) }); } else if (throughputDelta > 0.1) { // 10% improvement improvements.push({ metric: 'throughput', baseline: baseline.metrics.throughput.requestsPerSecond, current: currentMetrics.throughput.requestsPerSecond, improvement: throughputDelta * 100 }); } // Compare error rate const errorRateDelta = ((currentMetrics.requests.errorRate - baseline.metrics.requests.errorRate) / baseline.metrics.requests.errorRate); if (errorRateDelta > 0.2) { // 20% increase in errors regressions.push({ metric: 'error_rate', baseline: baseline.metrics.requests.errorRate, current: currentMetrics.requests.errorRate, degradation: errorRateDelta * 100, severity: 'CRITICAL' }); } const verdict = regressions.length === 0 ? 'PASS' : regressions.some(r => r.severity === 'CRITICAL') ? 'FAIL' : 'WARNING'; // Store regression analysis await this.memoryStore.store('aqe/performance/regressions/latest', { current: currentMetrics, baseline: baseline.metrics, regressions, improvements, verdict, analyzedAt: new Date() }); // Emit regression detected events if (regressions.length > 0) { this.emitEvent('performance.regression.detected', { count: regressions.length, severity: verdict, regressions }, verdict === 'FAIL' ? 'critical' : 'high'); } return { current: currentMetrics, baseline: baseline.metrics, regressions, improvements, verdict }; } calculateRegressionSeverity(degradation) { if (degradation > 0.5) return 'CRITICAL'; // 50% degradation if (degradation > 0.3) return 'HIGH'; // 30% degradation if (degradation > 0.15) return 'MEDIUM'; // 15% degradation return 'LOW'; } // ============================================================================ // Baseline Management // ============================================================================ async establishBaseline(metadata) { const { metrics, version, environment } = metadata; const baseline = { timestamp: new Date(), metrics, loadProfile: this.config.loadProfile, version, environment }; const baselineId = `${version}-${environment}`; this.baselines.set(baselineId, baseline); // Store baseline await this.memoryStore.store(`aqe/performance/baselines/${baselineId}`, baseline); console.log(`Established performance baseline ${baselineId}`); return baseline; } async loadBaselines() { try { // Load baselines from memory const storedBaselines = await this.memoryStore.retrieve('aqe/performance/baselines'); if (storedBaselines) { for (const [id, baseline] of Object.entries(storedBaselines)) { this.baselines.set(id, baseline); } console.log(`Loaded ${this.baselines.size} performance baselines`); } } catch (error) { console.warn('Could not load baselines:', error); } } // ============================================================================ // Load Pattern Generation // ============================================================================ async generateLoadPattern(metadata) { const { pattern, virtualUsers, duration } = metadata; const loadProfile = { virtualUsers: virtualUsers || this.config.loadProfile.virtualUsers, duration: duration || this.config.loadProfile.duration, rampUpTime: Math.floor((duration || this.config.loadProfile.duration) * 0.1), pattern: pattern || this.config.loadProfile.pattern }; console.log(`Generated ${pattern} load pattern with ${virtualUsers} VUs`); return loadProfile; } // ============================================================================ // Helper Methods // ============================================================================ async initializeLoadTestingClient() { const tool = this.config.tools.loadTesting; console.log(`Initializing ${tool} load testing client`); // In real implementation, initialize actual client this.loadTestingClient = { tool, initialized: true }; } async initializeMonitoringClient() { const monitoring = this.config.tools.monitoring; console.log(`Initializing monitoring clients: ${monitoring.join(', ')}`); // In real implementation, initialize actual monitoring clients this.monitoringClient = { platforms: monitoring, initialized: true }; } registerPerformanceEventHandlers() { // Listen for test execution requests from other agents this.registerEventHandler({ eventType: 'test.execution.complete', handler: async (event) => { // Automatically run performance tests after functional tests console.log('Functional tests completed, considering performance test run'); } }); } parseTestConfig(metadata) { return { targetUrl: metadata.targetUrl || 'http://localhost:3000', loadProfile: metadata.loadProfile || this.config.loadProfile, thresholds: metadata.thresholds || this.config.thresholds, monitoring: metadata.monitoring }; } async storeTestResults(testId, result) { // Store in active tests this.activeTests.set(testId, result); // Store in memory for other agents await this.memoryStore.store(`aqe/performance/results/${testId}`, result); // Store in shared memory await this.storeSharedMemory('latest-test', { testId, timestamp: new Date(), passed: result.slaViolations.length === 0, metrics: result.metrics }); } generateRecommendations(metrics, bottlenecks, violations) { const recommendations = []; // Recommendations based on bottlenecks for (const bottleneck of bottlenecks) { recommendations.push(...bottleneck.remediation); } // Recommendations based on SLA violations if (violations.length > 0) { recommendations.push('Review and optimize critical user paths'); recommendations.push('Consider infrastructure scaling'); } // General recommendations based on metrics if (metrics.requests.errorRate > 1) { recommendations.push('Investigate and fix error sources to reduce error rate'); } if (metrics.latency.p99 > metrics.latency.p95 * 2) { recommendations.push('Large P99 tail latency detected - investigate outliers'); } // Deduplicate recommendations return Array.from(new Set(recommendations)); } async analyzePerformance(metadata) { const { testId } = metadata; // Retrieve test results const result = this.activeTests.get(testId) || await this.memoryStore.retrieve(`aqe/performance/results/${testId}`); if (!result) { throw new Error(`Test results not found for ${testId}`); } return { summary: { passed: result.slaViolations.length === 0, duration: result.duration, totalRequests: result.metrics.requests.total, errorRate: result.metrics.requests.errorRate, p95Latency: result.metrics.latency.p95 }, bottlenecks: result.bottlenecks, violations: result.slaViolations, recommendations: result.recommendations }; } getDefaultThresholds() { return { maxLatencyP95: 500, maxLatencyP99: 1000, minThroughput: 1000, maxErrorRate: 0.01, maxCpuUsage: 80, maxMemoryUsage: 85 }; } } exports.PerformanceTesterAgent = PerformanceTesterAgent; //# sourceMappingURL=PerformanceTesterAgent.js.map