UNPKG

hardhat

Version:

Hardhat is an extensible developer tool that helps smart contract developers increase productivity by reliably bringing together the tools they want.

269 lines (224 loc) 6.76 kB
import type { HookManager } from "../../../types/hooks.js"; import type { NewTaskActionFunction, Task, TaskArguments, } from "../../../types/tasks.js"; import { assertHardhatInvariant, HardhatError, } from "@nomicfoundation/hardhat-errors"; import chalk, { type ChalkInstance } from "chalk"; import { HardhatRuntimeEnvironmentImplementation } from "../../core/hre.js"; interface TestActionArguments { testFiles: string[]; chainType: string; grep: string | undefined; noCompile: boolean; verbosity: number; } const runAllTests: NewTaskActionFunction<TestActionArguments> = async ( { testFiles, chainType, grep, noCompile, verbosity }, hre, ) => { // If this code is executed, it means the user has not specified a test runner. // If file paths are specified, we need to determine which test runner applies to each test file. // If no file paths are specified, each test runner will execute all tests located under its configured path in the Hardhat configuration. const subtasksToFiles = testFiles.length !== 0 ? await registerTestRunnersForFiles(testFiles, hre.hooks) : {}; const thisTask = hre.tasks.getTask("test"); if (!noCompile) { await hre.tasks.getTask("build").run({ noTests: true, }); } if (hre.globalOptions.coverage === true) { assertHardhatInvariant( hre instanceof HardhatRuntimeEnvironmentImplementation, "Expected HRE to be an instance of HardhatRuntimeEnvironmentImplementation", ); hre._coverage.disableReport(); } const testSummaries: Record< string, { failed: number; passed: number; skipped: number; todo: number; failureOutput: string; } > = {}; let failureIndex = 1; for (const subtask of thisTask.subtasks.values()) { const files = getTestFilesForSubtask(subtask, testFiles, subtasksToFiles); if (files === undefined) { // This scenario occurs when `testFiles` are provided, // but none are assigned to the current subtask, so it should be skipped continue; } const args: TaskArguments = { testFiles: files, grep, noCompile: subtask.options.has("noCompile"), }; if (subtask.options.has("chainType")) { args.chainType = chainType; } if (subtask.options.has("verbosity")) { args.verbosity = verbosity; } if (subtask.options.has("testSummaryIndex")) { args.testSummaryIndex = failureIndex; const summaryId = subtask.id[subtask.id.length - 1]; testSummaries[summaryId] = await subtask.run(args); failureIndex += testSummaries[summaryId].failed ?? 0; } else { await subtask.run(args); } } const passed: Array<[string, number]> = []; const failed: Array<[string, number]> = []; const skipped: Array<[string, number]> = []; const todo: Array<[string, number]> = []; const outputLines: string[] = []; for (const [subtaskName, results] of Object.entries(testSummaries)) { if (results.passed > 0) { passed.push([subtaskName, results.passed]); } if (results.failed > 0) { failed.push([subtaskName, results.failed]); } if (results.skipped > 0) { skipped.push([subtaskName, results.skipped]); } if (results.todo > 0) { todo.push([subtaskName, results.todo]); } /** * Failure formatting test cases: * * no failures * only node * multiple node * only solidity * - 1 top * - 2 bottom but sometimes - 1 instead??? how many do we need bottom * multiple solidity * single node + single solidity * multiple node + multiple solidity * single node + multiple solidity * multiple node + single solidity * */ if (results.failureOutput !== "") { const output = results.failureOutput; if (subtaskName.includes("node")) { outputLines.push(`\n${output}\n`); } else { outputLines.push(output); } } } if (passed.length > 0) { logSummaryLine("passing", passed, chalk.green); } if (failed.length > 0) { logSummaryLine("failing", failed, chalk.red); } if (skipped.length > 0) { logSummaryLine("skipped", skipped, chalk.cyan); } if (todo.length > 0) { logSummaryLine("todo", todo, chalk.blue); } if (outputLines.length > 0) { console.log( outputLines .map((o) => { const nl = o.match(/\n+$/gm); if (nl !== null) { return o.replace(new RegExp(`${nl[0]}$`), "\n"); } return o; }) .join("\n"), ); } console.log(); if (hre.globalOptions.coverage === true) { assertHardhatInvariant( hre instanceof HardhatRuntimeEnvironmentImplementation, "Expected HRE to be an instance of HardhatRuntimeEnvironmentImplementation", ); const ids = Array.from(thisTask.subtasks.keys()); hre._coverage.enableReport(); await hre._coverage.report(...ids); console.log(); } if (process.exitCode !== undefined && process.exitCode !== 0) { console.error("Test run failed"); } }; function logSummaryLine( label: string, items: Array<[string, number]>, color: ChalkInstance = chalk.white, ): void { let total = 0; const str = items .map(([name, count]) => { total += count; return `${count} ${name}`; }) .join(", "); console.log(`${color(`${total} ${label}`)} (${str})`); } async function registerTestRunnersForFiles( testFiles: string[], hooks: HookManager, ): Promise<Record<string, string[]>> { const subtasksToFiles: Record<string, string[]> = {}; const notFound: string[] = []; for (const file of testFiles) { const subtaskName = await hooks.runHandlerChain( "test", "registerFileForTestRunner", [file], async (_file) => undefined, ); if (subtaskName === undefined) { notFound.push(file); continue; } if (subtasksToFiles[subtaskName] === undefined) { subtasksToFiles[subtaskName] = []; } subtasksToFiles[subtaskName].push(file); } if (notFound.length !== 0) { throw new HardhatError( HardhatError.ERRORS.CORE.TEST_PLUGIN.CANNOT_DETERMINE_TEST_RUNNER, { files: notFound.join(", "), }, ); } return subtasksToFiles; } function getTestFilesForSubtask( subtask: Task, testFiles: string[], subtaskToFiles: Record<string, string[]>, ): string[] | undefined { if (testFiles.length === 0) { return []; } // subtask.id is an array like ['test', '<pluginName>', …]; // index 1 holds the specific plugin’s subtask name (e.g. 'node') const pluginName = subtask.id[1]; return subtaskToFiles[pluginName]; } export default runAllTests;