ctrlshiftleft
Version:
AI-powered toolkit for embedding QA and security testing into development workflows
187 lines (163 loc) • 7.49 kB
text/typescript
import { Command } from 'commander';
import chalk from 'chalk';
import ora from 'ora';
import path from 'path';
import { TestRunner, TestFileResult, TestError } from '../core/testRunner';
import { isValidTestPath } from '../utils/fileUtils';
import { table } from 'table';
import figures from 'figures';
export function runCommand(program: Command): void {
program
.command('run')
.description('Execute generated end-to-end tests')
.argument('[test-path]', 'Path to test file or directory', './tests')
.option('-b, --browser <browser>', 'Browser to run tests in', 'chromium')
.option('-h, --headless', 'Run browser in headless mode', true)
.option('-t, --timeout <timeout>', 'Test timeout in seconds', '30')
.option('-r, --reporter <reporter>', 'Test reporter format', 'list')
.option('-w, --workers <workers>', 'Number of parallel workers to use', '1')
.option('-v, --verbose', 'Show detailed test output', false)
.option('-f, --filter <filter>', 'Filter tests by name pattern')
.option('--retries <retries>', 'Number of retries for failed tests', '0')
.option('--update-snapshots', 'Update snapshots', false)
.option('--type <type>', 'Filter tests by type (ui, api, security)')
.action(async (testPath: string, options) => {
const spinner = ora('Preparing test environment...').start();
try {
// Validate test path
const absoluteTestPath = path.resolve(process.cwd(), testPath);
if (!await isValidTestPath(absoluteTestPath)) {
spinner.fail(`Invalid test path: ${testPath}`);
return;
}
// Build filter pattern from type option if specified
let filter = options.filter || '';
if (options.type) {
const typeMappings: Record<string, string> = {
'ui': 'UI/UX',
'api': 'API',
'security': 'Security'
};
const typeFilter = typeMappings[options.type.toLowerCase()];
if (typeFilter) {
filter = filter ? `${filter}|${typeFilter}` : typeFilter;
}
}
// Initialize test runner
const runner = new TestRunner({
browser: options.browser,
headless: options.headless === 'false' ? false : Boolean(options.headless),
timeout: parseInt(options.timeout, 10),
reporter: options.reporter,
workers: parseInt(options.workers, 10),
filter,
retries: parseInt(options.retries, 10),
verbose: options.verbose,
updateSnapshots: options.updateSnapshots
});
// Set up event listeners for real-time feedback
if (options.verbose) {
runner.on('run:start', ({ testFiles }) => {
spinner.stop();
console.log(chalk.blue(`\nRunning ${testFiles.length} test files${options.workers > 1 ? ` with ${options.workers} workers` : ''}...`));
});
runner.on('file:start', ({ path: filePath }) => {
console.log(chalk.cyan(`\nRunning tests in ${path.relative(process.cwd(), filePath)}...`));
});
runner.on('test:start', ({ path: filePath, name }) => {
console.log(chalk.gray(` ${figures.play} ${name}`));
});
runner.on('test:end', (result) => {
const icon = result.status === 'passed' ? chalk.green(figures.tick) :
result.status === 'failed' ? chalk.red(figures.cross) : chalk.yellow(figures.warning);
console.log(` ${icon} ${result.name} (${Math.round(result.duration)}ms)`);
if (result.status === 'failed' && result.error) {
console.log(chalk.red(` ${result.error.message}`));
}
});
runner.on('error', (error) => {
console.error(chalk.red(`Error in ${path.relative(process.cwd(), error.testPath)}: ${error.message}`));
});
}
spinner.text = 'Running tests...';
const results = await runner.runTests(absoluteTestPath);
// Clear spinner
spinner.stop();
// Print final results
console.log('\n' + chalk.bgBlue.white(' TEST RESULTS ') + '\n');
// Format duration to be more readable
const formatDuration = (ms: number): string => {
if (ms < 1000) return `${ms.toFixed(0)}ms`;
return `${(ms / 1000).toFixed(1)}s`;
};
// Print summary table
const summaryData = [
[chalk.bold('Total'), chalk.bold('Passed'), chalk.bold('Failed'), chalk.bold('Skipped'), chalk.bold('Duration')],
[
results.total.toString(),
chalk.green(results.passed.toString()),
results.failed > 0 ? chalk.red(results.failed.toString()) : '0',
results.skipped > 0 ? chalk.yellow(results.skipped.toString()) : '0',
formatDuration(results.duration)
]
];
console.log(table(summaryData, {
border: {
topBody: '─',
topJoin: '┬',
topLeft: '┌',
topRight: '┐',
bottomBody: '─',
bottomJoin: '┴',
bottomLeft: '└',
bottomRight: '┘',
bodyLeft: '│',
bodyRight: '│',
bodyJoin: '│',
joinBody: '─',
joinLeft: '├',
joinRight: '┤',
joinJoin: '┼',
}
}));
// If there are failures, print details
if (results.failed > 0) {
console.log(chalk.red.bold('\nTest Failures:'));
// Group failures by file
const fileResults = results.fileResults.filter(file => file.failed > 0);
fileResults.forEach((fileResult, index) => {
console.log(`\n${chalk.bold(`${index + 1}. ${path.relative(process.cwd(), fileResult.path)}`)}`);
// Print failed tests in this file
fileResult.tests
.filter(test => test.status === 'failed')
.forEach(test => {
console.log(chalk.red(` ${figures.cross} ${test.name}`));
if (test.error) {
console.log(chalk.gray(` ${test.error.message.replace(/\n/g, '\n ')}`));
}
});
// If no tests found but file has errors
if (fileResult.tests.length === 0 && fileResult.errors.length > 0) {
fileResult.errors.forEach(error => {
console.log(chalk.red(` ${figures.cross} ${error.message.split('\n')[0]}`));
if (options.verbose && error.stack) {
console.log(chalk.gray(` ${error.stack.replace(/\n/g, '\n ')}`));
}
});
}
});
}
// Print success message or exit with failure
if (results.failed === 0) {
console.log(chalk.green.bold('\n✓ All tests passed!'));
} else {
console.log(chalk.red.bold(`\n✗ ${results.failed} test(s) failed.`));
process.exit(1);
}
} catch (error) {
spinner.fail(`Test execution failed: ${(error as Error).message}`);
console.error(chalk.red((error as Error).stack));
process.exit(1);
}
});
}