usezap-cli
Version:
Zap CLI - Command-line interface for Zap API client
412 lines (372 loc) ⢠14.3 kB
JavaScript
/**
* Performance Testing CLI Command for Zap
*
* Provides CLI interface for running performance tests on Zap collections
* with configurable load patterns and comprehensive reporting.
*/
const fs = require('fs');
const path = require('path');
const { PerformanceRunner } = require('@usezap/performance');
const { getEnvVars } = require('../utils/common');
const { createCollectionJsonFromPathname } = require('../utils/collection');
const makeHtmlOutput = require('../reporters/html');
const makeJUnitOutput = require('../reporters/junit');
const handler = async (argv) => {
try {
const {
collection: collectionPath,
environment: envPath,
users = 1,
duration = '30s',
'ramp-up': rampUpTime = '10s',
'load-pattern': loadPattern = 'constant',
'think-time': thinkTime = 0,
'max-rps': maxRequestsPerSecond = null,
output = null,
'report-html': reportHtml = null,
'report-junit': reportJunit = null,
verbose = false
} = argv; // Validate collection directory/file
let collectionPathResolved = path.resolve(collectionPath);
let isDirectory = false;
if (!fs.existsSync(collectionPathResolved)) {
console.error(`Collection path not found: ${collectionPathResolved}`);
process.exit(1);
}
// Check if it's a directory (Zap collection) or file
const stats = fs.statSync(collectionPathResolved);
if (stats.isDirectory()) {
// Zap collection directory - check for zap.json
const zapJsonPath = path.join(collectionPathResolved, 'zap.json');
if (!fs.existsSync(zapJsonPath)) {
console.error(`Invalid Zap collection: zap.json not found in ${collectionPathResolved}`);
process.exit(1);
}
isDirectory = true;
} else {
// Individual file - check extension
const ext = path.extname(collectionPathResolved);
if (!['.json', '.zap'].includes(ext)) {
console.error(`Unsupported collection file format: ${ext}. Supported formats: .json, .zap, or Zap collection directory`);
process.exit(1);
}
}
// Load collection
console.log(`š Loading collection: ${collectionPathResolved}`);
let collection;
if (isDirectory) {
// Load Zap collection directory
collection = createCollectionJsonFromPathname(collectionPathResolved);
} else if (path.extname(collectionPathResolved) === '.json') {
// Load JSON collection file
collection = JSON.parse(fs.readFileSync(collectionPathResolved, 'utf8'));
} else {
console.error(`Direct .zap file loading not yet supported. Please provide a collection directory or JSON file.`);
process.exit(1);
}
// Load environment if provided
let environment = {};
if (envPath) {
if (!fs.existsSync(envPath)) {
console.error(`Environment file not found: ${envPath}`);
process.exit(1);
}
environment = getEnvVars(envPath);
}
// Configure performance runner
const performanceConfig = {
users: parseInt(users),
duration,
rampUpTime,
loadPattern,
thinkTime: parseInt(thinkTime),
maxRequestsPerSecond: maxRequestsPerSecond ? parseInt(maxRequestsPerSecond) : null
};
console.log(`š Starting performance test with ${users} virtual users for ${duration}`);
console.log(`š Load pattern: ${loadPattern}`);
if (verbose) {
console.log('Configuration:', JSON.stringify(performanceConfig, null, 2));
}
// Create and configure performance runner
const runner = new PerformanceRunner(performanceConfig);
// Set up real-time event listeners
runner.on('testStart', (data) => {
console.log(`ā±ļø Test started at ${data.timestamp}`);
});
runner.on('userAdded', (data) => {
if (verbose) {
console.log(`š¤ Added user ${data.userId}, active users: ${data.activeUsers}`);
}
});
runner.on('metrics', (metrics) => {
if (verbose) {
console.log(`š User ${metrics.userId}: ${metrics.requestCount} requests completed`);
}
});
runner.on('error', (error) => {
if (verbose) {
console.log(`ā Error from user ${error.userId}: ${error.error}`);
}
});
runner.on('testEnd', () => {
console.log('ā
Performance test completed');
});
// Execute performance test
const results = await runner.executePerformanceTest(collection, environment);
// Display summary results
console.log('\nš Performance Test Results:');
console.log('='.repeat(50));
console.log(`Total Duration: ${(results.summary.totalDuration / 1000).toFixed(2)}s`);
console.log(`Total Requests: ${results.summary.totalRequests}`);
console.log(`Requests/Second: ${results.summary.requestsPerSecond.toFixed(2)}`);
console.log(`Error Rate: ${results.summary.errorRate.toFixed(2)}%`);
console.log(`Throughput: ${(results.summary.throughput / 1024).toFixed(2)} KB/s`);
console.log('\nā±ļø Response Time Statistics:');
console.log(`Min: ${results.responseTime.min}ms`);
console.log(`Max: ${results.responseTime.max}ms`);
console.log(`Average: ${results.responseTime.avg.toFixed(2)}ms`);
console.log(`50th percentile: ${results.responseTime.p50}ms`);
console.log(`95th percentile: ${results.responseTime.p95}ms`);
console.log(`99th percentile: ${results.responseTime.p99}ms`);
if (Object.keys(results.errors).length > 0) {
console.log('\nā Error Summary:');
Object.entries(results.errors).forEach(([error, count]) => {
console.log(`${error}: ${count} occurrences`);
});
}
// Generate reports
if (output || reportHtml || reportJunit) {
console.log('\nš Generating reports...');
// Convert results to compatible format for existing reporters
const compatibleResults = convertToReportFormat(results, collection);
if (reportHtml) {
await generatePerformanceHtmlReport(results, reportHtml);
console.log(`š HTML performance report: ${reportHtml}`);
}
if (reportJunit) {
makeJUnitOutput(compatibleResults, reportJunit);
console.log(`š JUnit report: ${reportJunit}`);
}
if (output) {
fs.writeFileSync(output, JSON.stringify(results, null, 2));
console.log(`š¾ JSON results: ${output}`);
}
}
console.log('\n⨠Performance testing completed successfully!');
} catch (error) {
console.error('ā Performance test failed:', error.message);
if (argv.verbose) {
console.error(error.stack);
}
process.exit(1);
}
};
/**
* Convert performance results to format compatible with existing reporters
*/
function convertToReportFormat(performanceResults, collection) {
const requests = performanceResults.timeline.map((timepoint, index) => ({
description: `Performance Test - Timepoint ${index + 1}`,
suitename: collection.name || 'Performance Test',
request: {
method: 'PERFORMANCE',
url: 'performance-test'
},
assertionResults: [
{
lhsExpr: 'avg_response_time',
rhsExpr: `lt ${timepoint.avgResponseTime + 100}`, // Allow some variance
status: timepoint.avgResponseTime < 1000 ? 'pass' : 'fail',
error: timepoint.avgResponseTime >= 1000 ? 'Response time exceeded threshold' : null
},
{
lhsExpr: 'requests_per_second',
rhsExpr: `gt 0`,
status: timepoint.requestsPerSecond > 0 ? 'pass' : 'fail'
}
],
runtime: timepoint.avgResponseTime / 1000,
timestamp: timepoint.timestamp
}));
return requests;
}
/**
* Generate enhanced HTML report with performance visualizations
*/
async function generatePerformanceHtmlReport(results, outputPath) {
const templatePath = path.join(__dirname, '../../templates/performance-report.html');
// Check if performance template exists, otherwise use basic template
let template;
if (fs.existsSync(templatePath)) {
template = fs.readFileSync(templatePath, 'utf8');
} else {
// Fallback to basic HTML template with performance data
template = generateBasicPerformanceTemplate();
}
const htmlContent = template
.replace('__PERFORMANCE_RESULTS__', JSON.stringify(results, null, 2))
.replace('__SUMMARY_DATA__', generateSummaryHtml(results))
.replace('__TIMELINE_DATA__', JSON.stringify(results.timeline));
fs.writeFileSync(outputPath, htmlContent);
}
/**
* Generate basic performance HTML template
*/
function generateBasicPerformanceTemplate() {
return `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Zap Performance Test Report</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
.summary { display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 20px; margin-bottom: 30px; }
.metric { background: #f5f5f5; padding: 20px; border-radius: 8px; text-align: center; }
.metric h3 { margin: 0 0 10px 0; color: #333; }
.metric .value { font-size: 24px; font-weight: bold; color: #2196F3; }
.chart-container { width: 100%; height: 400px; margin: 20px 0; }
.error-summary { background: #ffe6e6; padding: 15px; border-radius: 8px; margin: 20px 0; }
</style>
</head>
<body>
<h1>š Zap Performance Test Report</h1>
<div class="summary">__SUMMARY_DATA__</div>
<div class="chart-container">
<canvas id="timelineChart"></canvas>
</div>
<script>
const timelineData = __TIMELINE_DATA__;
const ctx = document.getElementById('timelineChart').getContext('2d');
new Chart(ctx, {
type: 'line',
data: {
labels: timelineData.map(d => new Date(d.timestamp).toLocaleTimeString()),
datasets: [{
label: 'Requests/Second',
data: timelineData.map(d => d.requestsPerSecond),
borderColor: '#2196F3',
tension: 0.1
}, {
label: 'Avg Response Time (ms)',
data: timelineData.map(d => d.avgResponseTime),
borderColor: '#FF9800',
tension: 0.1,
yAxisID: 'y1'
}]
},
options: {
responsive: true,
interaction: { intersect: false },
scales: {
y: { type: 'linear', display: true, position: 'left' },
y1: { type: 'linear', display: true, position: 'right' }
}
}
});
</script>
</body>
</html>`;
}
/**
* Generate summary HTML for performance metrics
*/
function generateSummaryHtml(results) {
return `
<div class="metric">
<h3>Total Requests</h3>
<span class="value">${results.summary.totalRequests}</span>
</div>
<div class="metric">
<h3>Requests/Second</h3>
<span class="value">${results.summary.requestsPerSecond.toFixed(2)}</span>
</div>
<div class="metric">
<h3>Avg Response Time</h3>
<span class="value">${results.responseTime.avg.toFixed(2)}ms</span>
</div>
<div class="metric">
<h3>Error Rate</h3>
<span class="value">${results.summary.errorRate.toFixed(2)}%</span>
</div>
<div class="metric">
<h3>95th Percentile</h3>
<span class="value">${results.responseTime.p95}ms</span>
</div>
<div class="metric">
<h3>Throughput</h3>
<span class="value">${(results.summary.throughput / 1024).toFixed(2)} KB/s</span>
</div>
`;
}
module.exports = {
command: 'performance <collection>',
describe: 'Run performance tests on a Zap collection',
builder: (yargs) => {
return yargs
.positional('collection', {
describe: 'Path to the Zap collection file',
type: 'string'
})
.option('environment', {
alias: 'e',
describe: 'Environment file path',
type: 'string'
})
.option('users', {
alias: 'u',
describe: 'Number of virtual users',
type: 'number',
default: 1
})
.option('duration', {
alias: 'd',
describe: 'Test duration (e.g., 30s, 5m, 1h)',
type: 'string',
default: '30s'
})
.option('ramp-up', {
describe: 'Ramp-up time for load pattern',
type: 'string',
default: '10s'
})
.option('load-pattern', {
describe: 'Load pattern: constant, ramp, spike',
choices: ['constant', 'ramp', 'spike'],
default: 'constant'
})
.option('think-time', {
describe: 'Think time between requests (ms)',
type: 'number',
default: 0
})
.option('max-rps', {
describe: 'Maximum requests per second',
type: 'number'
})
.option('output', {
alias: 'o',
describe: 'Output file for JSON results',
type: 'string'
})
.option('report-html', {
describe: 'Generate HTML performance report',
type: 'string'
})
.option('report-junit', {
describe: 'Generate JUnit XML report',
type: 'string'
})
.option('verbose', {
alias: 'v',
describe: 'Verbose output',
type: 'boolean',
default: false
})
.example('$0 performance api-tests.zap --users 10 --duration 2m', 'Run with 10 users for 2 minutes')
.example('$0 performance api-tests.zap --load-pattern ramp --users 50 --report-html report.html', 'Ramp test with HTML report');
},
handler
};