peter
Version:
Peter Test Framework
350 lines (301 loc) • 11.8 kB
JavaScript
/**
* @file src/index.js
* @author Ryan Rossiter, ryan@kingsds.network
* @date July 2020
*
* This is the main module for the Peter Test Framework.
* It exports the startPeter function which will use the provided
* options to execute tests using the appropriate test drivers.
*/
;
const fs = require('fs');
const path = require('path');
const chalk = require('chalk');
const dotenv = require('dotenv')
const { walkDirectorySync } = require('./utils/walk-directory');
const { enableDebugging } = require('./utils/debug');
const { getDriverList } = require('./drivers');
const Runner = require('./runner');
const IGNORED_TEST_EXTENSIONS = [ 'skip', 'stdin', 'js', 'ts', 'md', 'yml', 'txt', 'xml', 'incl' ];
const IGNORED_TEST_PATTERNS = [ /^\./, /~$/, /#$/, /\.sw[op]/ ];
/**
* @typedef {object} PeterOptions
* @property {string[]} testPaths Paths to files or directories to run tests from.
* @property {string} envFile Path to a dotenv file.
* @property {number} [concurrency=2] Number of tests to run concurrently, providing 0 will use 2x # cores.
* @property {boolean} [verbose=false] Enable verbose output.
* @property {boolean} [debug=false] Enable debugging information.
* @property {string[]} [childArgv=[]] Remaining arguments to pass as argv to child processes (after -- from CLI)
*/
/**
* @param {PeterOptions} options, initally from yargs
*/
exports.main = async function startPeter(options)
{
const { testPaths, envFile } = options;
const runnerOptions = Object.assign({}, options); /* copy to avoid accidental mutation of stuff controlling program by test drivers */
if (options.debug) {
enableDebugging();
}
validateOptions(options);
if (envFile)
dotenv.config({ path: path.resolve(process.cwd(), envFile) });
/* Die nicely, on the next pass of the event loop, giving the test
* drivers just enough warning to kill children.
*/
function die(reason)
{
process.emit('CLEANUP');
process.off('SIGTERM', die);
if (!process.exitCode)
process.exitCode = 98;
setImmediate(() => process.exit());
setTimeout(() => process.kill(process.pid, 9), 1000).unref(); /* should be unreached */
}
process.on('SIGHUP', die);
process.on('SIGINT', die);
process.on('SIGTERM', die);
let testFiles = [];
for (let testPath of testPaths)
testFiles.push(...getTestFiles(testPath));
const repeatTestFiles = [];
for (let i=0; i < options.repeat; i++)
repeatTestFiles.push(...testFiles);
testFiles.push(...repeatTestFiles);
/* Randomize order of tests */
testFiles = testFiles.map(filename => { filename = new String(filename); filename.sort = Math.random(); return filename});
testFiles.sort((a, b) => a.sort - b.sort);
testFiles = testFiles.map(filename => filename.toString());
const runner = new Runner(testFiles, runnerOptions);
const results = await runner.start();
let testCount = 0;
let passCount = 0;
let expectedFailures = 0;
let observedExpectedFailures = 0;
let stderrArray = [];
if (testFiles.length === 0)
throw new Error('No test files found in any of ' + testPaths);
const testcases = await Promise.all(
results.map(async (stuff) => {
const { testFile, resolveResult } = stuff;
const knownFailingTest = testFile.endsWith('.failing');
var localTestCount = 0;
var localPassCount = 0;
const testResultData = await resolveResult()
.catch((err) => {
stderrArray.push([`In ${testFile}:`, err])
return false;
});
const { result } = testResultData;
let failures = [];
const testcaseId = testFile;
var testcaseName = require('path').basename(testFile);
const testcaseTime = 0;
const isTapTest = testcaseName.includes('.tap');
const isSimpleTest = testcaseName.includes('.simple');
if (typeof result === 'object')
{
/* tap test, or .simple test with rich test results */
let noTestRun = false;
if (result.plan && result.plan.start > result.plan.end && !knownFailingTest) /* No tests run, but zora marks this as okay. We want them to fail */
{
result.ok = false;
noTestRun = true;
}
// If we know the test is failing, don't count failures as such
if (knownFailingTest)
{
result.pass = result.count;
expectedFailures += Math.max(1, result.failures.length);
observedExpectedFailures += result.failures.length;
}
testCount += result.count;
passCount += result.pass;
localTestCount += result.count;
localPassCount += result.pass;
if ((!result.ok && !knownFailingTest) || noTestRun) {
if (options.verbose)
console.error(`\n${result.fail} failure(s) in ${testFile}:`);
console.group();
for (let failure of result.failures) {
const failureTitle = `${failure.name}${failure.diag && (' at ' + failure.diag.at)}`
const failureText = `Operator: ${failure.diag?.operator}
Expected: ${JSON.stringify(failure.diag.expected)}
Actual: ${JSON.stringify(failure.diag.actual)}`;
failures.push({
id: failure.id,
expected: knownFailingTest,
title: failureTitle,
text: failureText,
type: knownFailingTest ? 'WARNING' : 'ERROR',
failure,
});
if (options.verbose)
console.error(chalk.red(failure.name, 'at', failure.diag && failure.diag.at));
console.group();
if (options.verbose)
{
console.error('Operator:', failure.diag.operator);
console.error('Expected:', failure.diag.expected);
console.error('Actual:', failure.diag.actual);
}
console.groupEnd();
}
console.groupEnd();
}
}
else
{
/* .bash or .simple test with boolean result */
if (knownFailingTest)
expectedFailures++;
testCount++;
localTestCount++;
if (result.ok || result === true) {
passCount++;
localPassCount++;
}
else
{
if (knownFailingTest)
{
observedExpectedFailures++;
// count the .failing test as a pass
passCount++;
localPassCount++;
}
else
{
failures.push({
id: 1,
title: 'failure',
text: 'test failed',
type: testFile.endsWith('failing') ? 'WARNING' : 'ERROR',
failure: '',
});
}
}
}
const junitFailures = observedExpectedFailures ? failures.filter(failure => !failure.expected) : failures; /* don't tell jUnit about failures that were expected */
let testcases = '';
let xmlName = encodeXMLString(testcaseName);
if (result.plan)
{
if (result.plan.start > result.plan.end && !knownFailingTest) /* Occurs when no zora tests were run in testfile. There will be one failure */
{
result.plan.end = 1;
junitFailures.push(failures[0]);
}
for (let t = result.plan.start ; t <= result.plan.end; t++)
{
if (!t) t = 1; // result.plan.start can be null for tap test errors
const failure = junitFailures.find(ff => ff.id === t);
const testcase = result.failures?.find(ff => ff.id === t) || result.passes?.find(pp => pp.id === t);
if (isTapTest)
xmlName = `${testcaseId}:${t}-${encodeXMLString(testcaseName)}`
if (knownFailingTest && !testcase.ok)
testcaseName += ' (FAILED AS EXPECTED)';
if (testcase.skip)
testcaseName += ' (SKIPPED)';
testcases += `
<testcase id="${encodeXMLString(testcaseId + ':' + t)}" name="${xmlName}" classname="${encodeXMLString(testcaseName)}" time="${testcaseTime}">`;
if (failure && !testcase.ok)
testcases += `
<failure message="${encodeXMLString(`${testcaseName}${testcase.diag && (' at ' + testcase.diag.at)}`)}" type="${encodeXMLString(failure.type)}">
${encodeXMLString(failure.text)}
${testcase.stderr ? (encodeXMLString('stderr: ' + JSON.stringify(testcase.stderr, null, 2))) : ''}</failure>`
testcases += `
</testcase>`;
}
}
else
{
testcases = `
<testcase id="${encodeXMLString(testcaseId)}" name="${encodeXMLString(testcaseName)}" time="${testcaseTime}">${
junitFailures.map(f => {
return `
<failure message="${encodeXMLString(f.title)}" type="${encodeXMLString(f.type)}">${encodeXMLString(f.text)}
${f.failure.stderr ? (encodeXMLString('stderr: ' + JSON.stringify(f.failure.stderr, null, 2))) : ''}</failure>`
}).join('')
}
</testcase>`;
}
const testsuite = `
<testsuite id="${encodeXMLString(testcaseId)}" name="${encodeXMLString(testcaseName)}" tests="${localTestCount}" failures="${localTestCount - localPassCount}">${testcases}
</testsuite>`
return testsuite;
})
);
console.log(`\nDone. ${passCount}/${testCount} tests passed; ${observedExpectedFailures}/${expectedFailures} expected failures`);
for(let i of stderrArray){
console.log(chalk.yellow(i));
}
if (options.junit)
{
const xml = `<?xml version="1.0" encoding="utf-8" ?>
<testsuites>${testcases.join('')}
</testsuites>\n`;
console.log(`Writing JUnit output to ${options.junit}`);
require('fs').writeFileSync(options.junit, xml, 'utf8');
}
if (passCount < testCount)
process.exitCode = 1;
}
/**
* Validate options.
* @param {PeterOptions} options
* @returns {PeterOptions & import('./runner').RunnerOptions}
*/
function validateOptions(options)
{
if (!Array.isArray(options.testPaths)) {
throw new Error(`Option 'testPaths' should be a array and not ${typeof options.testPaths}.`);
}
if (typeof options.envFile !== 'string') {
throw new Error(`Option 'envFile' should be a string and not ${typeof options.envFile}.`);
}
}
/**
* @param {string} testPath
* @returns {string[]}
*/
function getTestFiles(testPath)
{
var testFiles;
const testPathStats = fs.existsSync(testPath) && fs.statSync(testPath);
if (testPathStats && testPathStats.isFile()) {
testFiles = [testPath];
} else if (testPathStats && testPathStats.isDirectory()) {
testFiles = walkDirectorySync(testPath);
} else {
console.error(`peter: ${chalk.yellow(testPath)}: No such file or directory`);
process.exit(2);
}
const driverList = getDriverList();
return testFiles.filter((filename) => {
if (filename.endsWith('.failing'))
filename = filename.slice(0, -8);
const ext = path.extname(filename).slice(1);
/* Ignore files in "lib" folder. These folders are used for test-helping files, and can contain file extensions unknown to Peter */
if (IGNORED_TEST_EXTENSIONS.includes(ext) || filename.includes('/lib/')) {
return false;
}
for (let re of IGNORED_TEST_PATTERNS) {
if (path.basename(filename).match(re))
return false;
}
if (!driverList.includes(ext)) {
throw new Error(`Missing driver '${ext}' for test file ${filename}.`);
}
return true;
});
}
/** A simple XMLEscape function to encode the most sketchy characters
*/
function encodeXMLString(src) {
return String(src).replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''');
}