testify-universal-cli
Version:
Universal interactive CLI tool for scanning and executing tests across multiple programming languages
715 lines (630 loc) • 23.7 kB
JavaScript
import chalk from 'chalk';
import { execa } from 'execa';
import inquirer from 'inquirer';
import fs from 'node:fs/promises';
import ora from 'ora';
import path from 'path';
import { getGoRunner } from '../runners/golang.js';
import { getNodeRunner } from '../runners/node.js';
import { getPythonRunner } from '../runners/python.js';
import { getRubyRunner } from '../runners/ruby.js';
import { logger } from './logger.js';
// ASCII logo to display when CLI starts
const ASCII_LOGO = `
_______ _ _ __
|__ __| | | (_)/ _|
| | ___ ___| |_ _| |_ _ _
| |/ _ \\/ __| __| | _| | | |
| | __/\\__ \\ |_| | | | |_| |
|_|\\___||___/\\__|_|_| \\__, |
__/ |
|___/
`;
/**
* Run tests with interactive selection
*
* @param {object} options - Test runner options
* @param {string} options.language - Repository language
* @param {Array<{filePath: string, relativePath: string, markers: string[]}>} options.testFiles - Test files with markers
* @param {string} options.cwd - Current working directory
* @param {string[]} options.markers - Test markers to filter by
* @param {string} options.customRunner - Custom test runner
* @param {boolean} options.debug - Enable debug mode
* @returns {Promise<object>} - Test results
*/
export async function runTests(options) {
const { language, testFiles, cwd, markers = [], customRunner, debug = false } = options;
// Configure logger based on debug flag
logger.setDebugMode(debug);
logger.debug('Running tests with options:', options);
// Display ASCII logo and header only once at startup
// Use cross-platform approach to clearing the screen
if (process.platform === 'win32') {
// Windows-specific clear approach
console.log('\x1Bc');
} else {
// Unix-like systems
console.clear();
}
// Keep track of selected markers and test files
let selectedMarkers = [...markers];
let selectedFiles = [];
let shouldExit = false;
// Helper function to display the header
function displayHeader() {
console.log(chalk.cyan(ASCII_LOGO));
console.log(chalk.gray('Use arrow keys to navigate, Enter to select, ESC to exit\n'));
}
// Helper function to redraw the screen with cross-platform compatibility
function redrawScreen() {
// Use a cross-platform approach to clearing the screen
// console.clear() doesn't work consistently across platforms
if (process.platform === 'win32') {
// Windows-specific clear approach
console.log('\x1Bc');
} else {
// Unix-like systems
console.clear();
}
displayHeader();
}
// Initial display of header
displayHeader();
logger.debug(`Found ${testFiles.length} test files for language: ${language}`);
if (testFiles.length === 0) {
console.log(chalk.yellow('No test files found in the repository.'));
return;
}
// Get unique markers from all test files
const allMarkers = new Set();
testFiles.forEach(file => {
file.markers.forEach(marker => allMarkers.add(marker));
});
// Filter test files by provided markers
const getFilteredTests = (markers) => {
if (markers.length === 0) {
return testFiles;
}
return testFiles.filter(file =>
markers.some(marker => file.markers.includes(marker))
);
};
// Format for the execute action that conforms to the requested format
const getExecuteActionName = (markers, files) => {
// Always return "Execute Tests" as specified in the requirements
return 'Execute Tests';
};
// This function will generate the menu of choices based on current state
const getChoices = (filteredTests, selectedMarkers, selectedFiles, allMarkers) => {
const choices = [];
// Add execution options at the top if we have selections
if (selectedMarkers.length > 0 || selectedFiles.length > 0) {
// Create execute action with the requested format
const executeName = getExecuteActionName(selectedMarkers, selectedFiles);
choices.push({
name: chalk.bold.green(`▶ ${executeName}`),
value: { type: 'execute' },
short: 'Execute Tests',
});
// Add option to clear selections
choices.push({
name: chalk.red('✕ Clear All Selections'),
value: { type: 'clear-all' },
short: 'Clear All',
});
// Add separator
choices.push(new inquirer.Separator(''));
}
// Always add "Run All Tests" option
choices.push({
name: chalk.bold('Run All Tests'),
value: { type: 'run-all' },
short: 'All Tests',
});
// Add marker filtering options first
if (allMarkers.size > 0) {
// Create a separator with filter title
choices.push(new inquirer.Separator(chalk.cyan.bold('── Filter By Marker ──')));
const markerChoices = [...allMarkers].map(marker => {
const isSelected = selectedMarkers.includes(marker);
return {
name: `${isSelected ? chalk.green('✓ ') : ' '}${marker}`,
value: { type: 'toggle-marker', marker },
short: `Filter: ${marker}`,
};
});
// Sort marker choices alphabetically
markerChoices.sort((a, b) => a.value.marker.localeCompare(b.value.marker));
choices.push(...markerChoices);
}
// Add test files section after markers
choices.push(new inquirer.Separator(chalk.cyan.bold('── Test Files ──')));
// Add test files
const testFileChoices = filteredTests.map(file => {
const isSelected = selectedFiles.some(f => f.filePath === file.filePath);
return {
name: `${isSelected ? chalk.green('✓ ') : ' '}${file.relativePath} ${file.markers.length > 0 ? chalk.gray(`[${file.markers.join(', ')}]`) : ''}`,
value: { type: 'toggle-file', file },
short: file.relativePath,
};
});
choices.push(...testFileChoices);
return choices;
};
// Main selection loop - continue until user decides to run tests
while (!shouldExit) {
// Redraw the screen at the start of each loop
redrawScreen();
const filteredTests = getFilteredTests(selectedMarkers);
if (filteredTests.length === 0 && selectedMarkers.length > 0) {
console.log(chalk.yellow(`No test files found with markers: ${selectedMarkers.join(', ')}`));
selectedMarkers = [];
continue;
}
// Generate choices based on current state
const choices = getChoices(filteredTests, selectedMarkers, selectedFiles, allMarkers);
// Display selection summary
console.log('\n' + chalk.bgCyan.black(' TESTIFY ') + ' Test Selection\n');
// Display active filters
if (selectedMarkers.length > 0) {
console.log(chalk.yellow.bold(`Active Filters (${selectedMarkers.length}):`));
const markerList = selectedMarkers.map(m => chalk.cyan(m)).join(', ');
console.log(` ${markerList}\n`);
} else {
console.log(chalk.gray('No active filters. Select markers below to filter tests.\n'));
}
// Display selected files
if (selectedFiles.length > 0) {
console.log(chalk.yellow.bold(`Selected Test Files (${selectedFiles.length}):`));
selectedFiles.forEach(file => {
console.log(` ${chalk.green('●')} ${file.relativePath}`);
});
console.log('');
}
// Modified inquirer prompt with better navigation
try {
const { selected } = await inquirer.prompt([
{
type: 'list',
name: 'selected',
message: 'Select an action:',
choices,
pageSize: 15,
default: 0,
// Prevent looping at the top/bottom
loop: false
},
]);
if (!selected) {
shouldExit = true;
continue;
}
// Handle selection based on type
switch (selected.type) {
case 'toggle-marker':
// Toggle marker selection
if (selectedMarkers.includes(selected.marker)) {
selectedMarkers = selectedMarkers.filter(m => m !== selected.marker);
} else {
selectedMarkers.push(selected.marker);
}
break;
case 'toggle-file':
// Toggle file selection
// Use cross-platform path comparison (case-insensitive on Windows)
const compareFilePaths = (path1, path2) => {
if (process.platform === 'win32') {
// Case-insensitive comparison for Windows
return path1.toLowerCase() === path2.toLowerCase();
} else {
// Case-sensitive comparison for Unix-like systems
return path1 === path2;
}
};
const fileIndex = selectedFiles.findIndex(f => compareFilePaths(f.filePath, selected.file.filePath));
if (fileIndex >= 0) {
selectedFiles.splice(fileIndex, 1);
} else {
selectedFiles.push(selected.file);
}
break;
case 'clear-all':
selectedMarkers = [];
selectedFiles = [];
break;
case 'execute':
shouldExit = true;
// Save the current state for displaying during test execution
const activeFilters = selectedMarkers.length > 0
? `Active Filters: ${selectedMarkers.map(m => chalk.cyan(m)).join(', ')}`
: 'No active filters';
// Clear the screen with cross-platform compatibility
if (process.platform === 'win32') {
console.log('\x1Bc'); // Windows-specific clear
} else {
console.clear(); // Unix-like systems
}
displayHeader();
console.log('\n' + chalk.bgCyan.black(' TESTIFY ') + ' Test Execution\n');
console.log(chalk.yellow.bold(activeFilters));
if (selectedFiles.length > 0) {
// Display selected files
console.log(chalk.yellow.bold(`\nSelected Test Files (${selectedFiles.length}):`));
selectedFiles.forEach(file => {
console.log(` ${chalk.green('●')} ${file.relativePath}`);
});
console.log('');
// Run selected individual files with persistent spinner
const spinner = ora({
text: `Running ${selectedFiles.length} test files...`,
color: 'cyan',
}).start();
try {
for (const file of selectedFiles) {
spinner.text = `Running ${file.relativePath}...`;
await runSingleTest(file, language, cwd, customRunner, spinner);
}
spinner.succeed('All tests completed');
} catch (error) {
spinner.fail(`Tests failed: ${error.message}`);
}
} else {
// Display filtered tests count
console.log(chalk.yellow.bold(`\nRunning ${filteredTests.length} test files based on filters\n`));
// Run tests with selected markers with persistent spinner
const spinner = ora({
text: 'Executing tests...',
color: 'cyan',
}).start();
try {
await runSelectedTests(
filteredTests,
language,
cwd,
selectedMarkers,
customRunner,
spinner
);
} catch (error) {
spinner.fail(`Tests failed: ${error.message}`);
}
}
break;
case 'run-all':
shouldExit = true;
// Clear the screen with cross-platform compatibility
if (process.platform === 'win32') {
console.log('\x1Bc'); // Windows-specific clear
} else {
console.clear(); // Unix-like systems
}
displayHeader();
console.log('\n' + chalk.bgCyan.black(' TESTIFY ') + ' Running All Tests\n');
console.log(chalk.yellow.bold(`Running all ${testFiles.length} test files\n`));
// Create spinner for running all tests
const spinner = ora({
text: `Preparing to run ${testFiles.length} tests...`,
color: 'cyan',
}).start();
try {
await runSelectedTests(
testFiles,
language,
cwd,
[],
customRunner,
spinner
);
} catch (error) {
spinner.fail(`Tests failed: ${error.message}`);
}
break;
default:
shouldExit = true;
break;
}
} catch (error) {
// Handle escape key or other interruptions
shouldExit = true;
}
}
}
/**
* Run selected tests
*
* @param {Array<{filePath: string, relativePath: string, markers: string[]}>} testFiles - Test files to run
* @param {string} language - Repository language
* @param {string} cwd - Current working directory
* @param {string[]} markers - Test markers to filter by
* @param {string} customRunner - Custom test runner
* @param {object} existingSpinner - Existing ora spinner
* @returns {Promise<object>} - Test results
*/
async function runSelectedTests(testFiles, language, cwd, markers, customRunner, existingSpinner) {
logger.debug(`Running ${testFiles.length} selected tests with markers: ${markers.join(', ') || 'none'}`);
const runner = getTestRunner(language, customRunner);
try {
// Use existing spinner or create a new one
const spinner = existingSpinner || ora('Running tests...').start();
// Update spinner text to show progress
spinner.text = `Running ${testFiles.length} tests${markers.length > 0 ? ` with markers: ${markers.join(', ')}` : ''}`;
const result = await runner.runAll(testFiles, cwd, markers);
// Output test results but keep the interface visible
if (result.stdout) {
// Pause the spinner while showing stdout
spinner.stopAndPersist({
symbol: '📋',
text: chalk.dim('Test output:')
});
// Show the test output
console.log(chalk.dim('─'.repeat(80)));
console.log(result.stdout);
console.log(chalk.dim('─'.repeat(80)));
// If we're not using an existing spinner, create a new one to show the final status
if (!existingSpinner) {
const statusSpinner = ora().start();
if (result.failed) {
statusSpinner.fail(chalk.red('Tests failed'));
process.exit(1);
} else {
statusSpinner.succeed(chalk.green('All tests passed'));
}
} else {
// If using an existing spinner, update its state
if (result.failed) {
existingSpinner.fail(chalk.red('Tests failed'));
process.exit(1);
} else {
existingSpinner.succeed(chalk.green('All tests passed'));
}
}
} else {
// No output, just update spinner
if (result.failed) {
spinner.fail(chalk.red('Tests failed'));
process.exit(1);
} else {
spinner.succeed(chalk.green('All tests passed'));
}
}
} catch (error) {
if (existingSpinner) {
existingSpinner.fail(chalk.red(`Error running tests: ${error.message}`));
} else {
console.error(chalk.red(`Error running tests: ${error.message}`));
}
process.exit(1);
}
}
/**
* Run a single test file
*
* @param {object} testFile - Test file to run
* @param {string} language - Repository language
* @param {string} cwd - Current working directory
* @param {string} customRunner - Custom test runner
* @param {object} existingSpinner - Existing ora spinner
* @returns {Promise<object>} - Test results
*/
async function runSingleTest(testFile, language, cwd, customRunner, existingSpinner) {
logger.debug(`Running single test file: ${testFile.relativePath}`);
const runner = getTestRunner(language, customRunner);
try {
// Use existing spinner or create a new one
const spinner = existingSpinner || ora(`Running ${testFile.relativePath}...`).start();
// Update spinner text
spinner.text = `Running ${testFile.relativePath}...`;
const result = await runner.runFile(testFile, cwd);
// Output test results but keep the interface visible
if (result.stdout) {
// Pause the spinner while showing stdout
spinner.stopAndPersist({
symbol: '📋',
text: chalk.dim(`${testFile.relativePath} output:`)
});
// Show the test output
console.log(chalk.dim('─'.repeat(80)));
console.log(result.stdout);
console.log(chalk.dim('─'.repeat(80)));
// If we're not using an existing spinner, create a new one to show the final status
if (!existingSpinner) {
const statusSpinner = ora().start();
if (result.failed) {
statusSpinner.fail(chalk.red(`Test failed: ${testFile.relativePath}`));
process.exit(1);
} else {
statusSpinner.succeed(chalk.green(`Test passed: ${testFile.relativePath}`));
}
} else {
// Just update text for existing spinner, don't complete it yet
if (result.failed) {
spinner.text = chalk.red(`Test failed: ${testFile.relativePath}`);
} else {
spinner.text = chalk.green(`Test passed: ${testFile.relativePath}`);
}
}
} else {
// No output, just update spinner
if (!existingSpinner) {
if (result.failed) {
spinner.fail(chalk.red(`Test failed: ${testFile.relativePath}`));
process.exit(1);
} else {
spinner.succeed(chalk.green(`Test passed: ${testFile.relativePath}`));
}
} else {
// Just update text for existing spinner
if (result.failed) {
spinner.text = chalk.red(`Test failed: ${testFile.relativePath}`);
} else {
spinner.text = chalk.green(`Test passed: ${testFile.relativePath}`);
}
}
}
} catch (error) {
if (existingSpinner) {
existingSpinner.fail(chalk.red(`Error running test ${testFile.relativePath}: ${error.message}`));
} else {
console.error(chalk.red(`Error running test: ${error.message}`));
}
process.exit(1);
}
}
/**
* Get the appropriate test runner based on language
*
* @param {string} language - Repository language
* @param {string} customRunner - Custom test runner
* @returns {object} - Test runner
*/
function getTestRunner(language, customRunner) {
logger.debug(`Getting test runner for language: ${language}, custom runner: ${customRunner || 'none'}`);
switch (language) {
case 'python':
return getPythonRunner(customRunner);
case 'ruby':
return getRubyRunner(customRunner);
case 'go':
return getGoRunner(customRunner);
case 'java':
// For now, handle Java using a generic runner that uses Maven/Gradle
return getGenericRunner('java', customRunner);
case 'rust':
// For Rust, we'll use a generic runner that invokes cargo test
return getGenericRunner('rust', customRunner);
case 'node':
default:
return getNodeRunner(customRunner);
}
}
/**
* Get a generic test runner for languages that have simple CLI test commands
*
* @param {string} language - Repository language
* @param {string} customRunner - Custom test runner
* @returns {object} - Generic test runner
*/
function getGenericRunner(language, customRunner) {
const runners = {
java: {
// Checks for Maven or Gradle
detector: async (cwd) => {
try {
// Normalize paths for cross-platform compatibility
const pomPath = path.resolve(cwd, 'pom.xml');
const gradlePath = path.resolve(cwd, 'build.gradle');
const gradleKtsPath = path.resolve(cwd, 'build.gradle.kts');
// Check if Maven's pom.xml exists
const hasMaven = await fs.access(pomPath).then(() => true).catch(() => false);
if (hasMaven) {
// On Windows, check for mvn.cmd or mvn.bat instead of mvn
if (process.platform === 'win32') {
try {
// Try running 'where mvn' to see if it's in PATH (Windows equivalent of 'which')
await execa('where', ['mvn']);
return 'mvn';
} catch (e) {
// If not found, check common Maven wrapper pattern
const hasMvnw = await fs.access(path.resolve(cwd, 'mvnw.cmd')).then(() => true).catch(() => false);
return hasMvnw ? './mvnw' : 'mvn';
}
}
return 'mvn';
}
// Check for Gradle files
const hasGradle = await fs.access(gradlePath).then(() => true).catch(() => false);
const hasGradleKts = await fs.access(gradleKtsPath).then(() => true).catch(() => false);
if (hasGradle || hasGradleKts) {
// On Windows, check for gradle.bat instead of gradle
if (process.platform === 'win32') {
try {
await execa('where', ['gradle']);
return 'gradle';
} catch (e) {
// If not found, check common Gradle wrapper pattern
const hasGradlew = await fs.access(path.resolve(cwd, 'gradlew.bat')).then(() => true).catch(() => false);
return hasGradlew ? './gradlew' : 'gradle';
}
}
return 'gradle';
}
return 'mvn'; // Default to Maven if not found
} catch (error) {
return 'mvn';
}
},
commands: {
mvn: {
all: ['mvn', 'test'],
file: (file) => ['mvn', 'test', `-Dtest=${path.basename(file.filePath, '.java')}`]
},
gradle: {
all: ['gradle', 'test'],
file: (file) => {
const className = path.basename(file.filePath, '.java');
return ['gradle', 'test', `--tests ${className}`];
}
}
}
},
rust: {
detector: async () => 'cargo',
commands: {
cargo: {
all: ['cargo', 'test'],
file: (file) => {
// Extract test name from file path
const filePath = path.basename(file.filePath, '.rs');
return ['cargo', 'test', '--', filePath];
}
}
}
}
};
const runner = runners[language];
return {
async runAll(testFiles, cwd, markers = []) {
try {
// Determine the right runner to use
const detectedRunner = customRunner || await runner.detector(cwd);
const command = runner.commands[detectedRunner].all;
const { stdout, stderr, exitCode } = await execa(command[0], command.slice(1), {
cwd,
all: true,
reject: false,
});
return {
stdout: stdout || stderr,
failed: exitCode !== 0,
};
} catch (error) {
return {
stdout: error.message,
failed: true,
};
}
},
async runFile(testFile, cwd) {
try {
// Determine the right runner to use
const detectedRunner = customRunner || await runner.detector(cwd);
const commandFn = runner.commands[detectedRunner].file;
const command = commandFn(testFile);
const { stdout, stderr, exitCode } = await execa(command[0], command.slice(1), {
cwd,
all: true,
reject: false,
});
return {
stdout: stdout || stderr,
failed: exitCode !== 0,
};
} catch (error) {
return {
stdout: error.message,
failed: true,
};
}
}
};
}