UNPKG

@wdio/cli

Version:
509 lines (508 loc) 21 kB
import exitHook from 'async-exit-hook'; import logger from '@wdio/logger'; import { validateConfig } from '@wdio/config'; import { ConfigParser } from '@wdio/config/node'; import { initializePlugin, initializeLauncherService, sleep, enableFileLogging } from '@wdio/utils'; import { setupDriver, setupBrowser } from '@wdio/utils/node'; import CLInterface from './interface.js'; import { runLauncherHook, runOnCompleteHook, runServiceHook } from './utils.js'; import { TESTRUNNER_DEFAULTS, WORKER_GROUPLOGS_MESSAGES } from './constants.js'; const log = logger('@wdio/cli:launcher'); class Launcher { _configFilePath; _args; _isWatchMode; configParser; isMultiremote = false; isParallelMultiremote = false; runner; interface; _exitCode = 0; _hasTriggeredExitRoutine = false; _schedule = []; _rid = []; _runnerStarted = 0; _runnerFailed = 0; _launcher; _resolve; constructor(_configFilePath, _args = {}, _isWatchMode = false) { this._configFilePath = _configFilePath; this._args = _args; this._isWatchMode = _isWatchMode; this.configParser = new ConfigParser(this._configFilePath, this._args); } /** * run sequence * @return {Promise} that only gets resolved with either an exitCode or an error */ async run() { await this.configParser.initialize(this._args); const config = this.configParser.getConfig(); /** * assign parsed autocompile options into args so it can be used within the worker * without having to read the config again */ this._args.autoCompileOpts = config.autoCompileOpts; const capabilities = this.configParser.getCapabilities(); this.isParallelMultiremote = Array.isArray(capabilities) && capabilities.every(cap => Object.values(cap).length > 0 && Object.values(cap).every(c => typeof c === 'object' && c.capabilities)); this.isMultiremote = this.isParallelMultiremote || !Array.isArray(capabilities); validateConfig(TESTRUNNER_DEFAULTS, { ...config, capabilities }); await enableFileLogging(config.outputDir); logger.setLogLevelsConfig(config.logLevels, config.logLevel); /** * For Parallel-Multiremote, only get the specs and excludes from the first object */ const totalWorkerCnt = Array.isArray(capabilities) ? capabilities .map((c) => { if (this.isParallelMultiremote) { const keys = Object.keys(c); return this.configParser.getSpecs(c[keys[0]].capabilities.specs, c[keys[0]].capabilities.exclude).length; } return this.configParser.getSpecs(c.specs, c.exclude).length; }) .reduce((a, b) => a + b, 0) : 1; this.interface = new CLInterface(config, totalWorkerCnt, this._isWatchMode); config.runnerEnv.FORCE_COLOR = Number(this.interface.hasAnsiSupport); const [runnerName, runnerOptions] = Array.isArray(config.runner) ? config.runner : [config.runner, {}]; const Runner = (await initializePlugin(runnerName, 'runner')).default; this.runner = new Runner(runnerOptions, config); /** * catches ctrl+c event */ exitHook(this._exitHandler.bind(this)); let exitCode = 0; let error = undefined; try { const caps = this.configParser.getCapabilities(); const { ignoredWorkerServices, launcherServices } = await initializeLauncherService(config, caps); this._launcher = launcherServices; this._args.ignoredWorkerServices = ignoredWorkerServices; /** * run pre test tasks for runner plugins * (e.g. deploy Lambda function to AWS) */ await this.runner.initialize(); /** * run onPrepare hook */ log.info('Run onPrepare hook'); await runLauncherHook(config.onPrepare, config, caps); await runServiceHook(this._launcher, 'onPrepare', config, caps); /** * pre-configure necessary driver for worker threads */ await Promise.all([ setupDriver(config, caps), setupBrowser(config, caps) ]); exitCode = await this._runMode(config, caps); /** * run onComplete hook * Even if it fails we still want to see result and end logger stream. * Also ensure that user hooks are run before service hooks so that e.g. * a user can use plugin service, e.g. shared store service is still * available running hooks in this order */ log.info('Run onComplete hook'); const onCompleteResults = await runOnCompleteHook(config.onComplete, config, caps, exitCode, this.interface.result); await runServiceHook(this._launcher, 'onComplete', exitCode, config, caps); // if any of the onComplete hooks failed, update the exit code exitCode = onCompleteResults.includes(1) ? 1 : exitCode; await logger.waitForBuffer(); this.interface.finalise(); } catch (err) { error = err; } finally { if (!this._hasTriggeredExitRoutine) { this._hasTriggeredExitRoutine = true; const passesCodeCoverage = await this.runner.shutdown(); if (!passesCodeCoverage) { exitCode = exitCode || 1; } } } if (error) { this.interface.logHookError(error); throw error; } return exitCode; } /** * run without triggering onPrepare/onComplete hooks */ _runMode(config, caps) { /** * fail if no caps were found */ if (!caps) { return new Promise((resolve) => { log.error('Missing capabilities, exiting with failure'); return resolve(1); }); } /** * avoid retries in watch mode */ const specFileRetries = this._isWatchMode ? 0 : config.specFileRetries; /** * schedule test runs */ let cid = 0; if (this.isMultiremote && !this.isParallelMultiremote) { /** * Multiremote mode */ this._schedule.push({ cid: cid++, caps: caps, specs: this._formatSpecs(caps, specFileRetries), availableInstances: config.maxInstances || 1, runningInstances: 0 }); } else { /** * Regular mode & Parallel Multiremote */ for (const capabilities of caps) { /** * when using browser runner we only allow one session per browser */ const availableInstances = this.isParallelMultiremote ? config.maxInstances || 1 : config.runner === 'browser' ? 1 : capabilities.maxInstances || capabilities['wdio:maxInstances'] || config.maxInstancesPerCapability; this._schedule.push({ cid: cid++, caps: capabilities, specs: this._formatSpecs(capabilities, specFileRetries), availableInstances, runningInstances: 0 }); } } return new Promise((resolve) => { this._resolve = resolve; /** * fail if no specs were found or specified */ if (Object.values(this._schedule).reduce((specCnt, schedule) => specCnt + schedule.specs.length, 0) === 0) { const { total, current } = config.shard; if (total > 1) { log.info(`No specs to execute in shard ${current}/${total}, exiting!`); return resolve(0); } log.error('No specs found to run, exiting with failure'); return resolve(1); } /** * return immediately if no spec was run */ if (this._runSpecs()) { resolve(0); } }); } /** * Format the specs into an array of objects with files and retries */ _formatSpecs(capabilities, specFileRetries) { let caps; if ('alwaysMatch' in capabilities) { caps = capabilities.alwaysMatch; } else if (typeof Object.keys(capabilities)[0] === 'object' && 'capabilities' in capabilities[Object.keys(capabilities)[0]]) { caps = {}; } else { caps = capabilities; } const specs = caps.specs || caps['wdio:specs']; const excludes = caps.exclude || caps['wdio:exclude']; const files = this.configParser.getSpecs(specs, excludes); return files.map((file) => { if (typeof file === 'string') { return { files: [file], retries: specFileRetries }; } else if (Array.isArray(file)) { return { files: file, retries: specFileRetries }; } log.warn('Unexpected entry in specs that is neither string nor array: ', file); // Returning an empty structure to avoid undefined return { files: [], retries: specFileRetries }; }); } /** * run multiple single remote tests * @return {Boolean} true if all specs have been run and all instances have finished */ _runSpecs() { /** * stop spawning new processes when CTRL+C was triggered */ if (this._hasTriggeredExitRoutine) { return true; } const config = this.configParser.getConfig(); while (this._getNumberOfRunningInstances() < config.maxInstances) { const schedulableCaps = this._schedule /** * bail if number of errors exceeds allowed */ .filter(() => { const filter = typeof config.bail !== 'number' || config.bail < 1 || config.bail > this._runnerFailed; /** * clear number of specs when filter is false */ if (!filter) { this._schedule.forEach((t) => { t.specs = []; }); } return filter; }) /** * make sure complete number of running instances is not higher than general maxInstances number */ .filter(() => this._getNumberOfRunningInstances() < config.maxInstances) /** * make sure the capability has available capacities */ .filter((a) => a.availableInstances > 0) /** * make sure capability has still caps to run */ .filter((a) => a.specs.length > 0) /** * make sure we are running caps with less running instances first */ .sort((a, b) => a.runningInstances - b.runningInstances); /** * continue if no capability were schedulable */ if (schedulableCaps.length === 0) { break; } const specs = schedulableCaps[0].specs.shift(); this._startInstance(specs.files, schedulableCaps[0].caps, schedulableCaps[0].cid, specs.rid, specs.retries); schedulableCaps[0].availableInstances--; schedulableCaps[0].runningInstances++; } return this._getNumberOfRunningInstances() === 0 && this._getNumberOfSpecsLeft() === 0; } /** * gets number of all running instances * @return {number} number of running instances */ _getNumberOfRunningInstances() { return this._schedule.map((a) => a.runningInstances).reduce((a, b) => a + b); } /** * get number of total specs left to complete whole suites * @return {number} specs left to complete suite */ _getNumberOfSpecsLeft() { return this._schedule.map((a) => a.specs.length).reduce((a, b) => a + b); } /** * Start instance in a child process. * @param {Array} specs Specs to run * @param {number} cid Capabilities ID * @param {string} rid Runner ID override * @param {number} retries Number of retries remaining */ async _startInstance(specs, caps, cid, rid, retries) { if (!this.runner || !this.interface) { throw new Error('Internal Error: no runner initialized, call run() first'); } const config = this.configParser.getConfig(); // wait before retrying the spec file if (typeof config.specFileRetriesDelay === 'number' && config.specFileRetries > 0 && config.specFileRetries !== retries) { await sleep(config.specFileRetriesDelay * 1000); } // Retried tests receive the cid of the failing test as rid // so they can run with the same cid of the failing test. const runnerId = rid || this._getRunnerId(cid); const processNumber = this._runnerStarted + 1; // process.debugPort defaults to 5858 and is set even when process // is not being debugged. const debugArgs = []; let debugType; let debugHost = ''; const debugPort = process.debugPort; for (const i in process.execArgv) { const debugArgs = process.execArgv[i].match('--(debug|inspect)(?:-brk)?(?:=(.*):)?'); if (debugArgs) { const [, type, host] = debugArgs; if (type) { debugType = type; } if (host) { debugHost = `${host}:`; } } } if (debugType) { debugArgs.push(`--${debugType}=${debugHost}${(debugPort + processNumber)}`); } // if you would like to add --debug-brk, use a different port, etc... const capExecArgs = [...(config.execArgv || [])]; // The default value for child.fork execArgs is process.execArgs, // so continue to use this unless another value is specified in config. const defaultArgs = (capExecArgs.length) ? process.execArgv : []; // If an arg appears multiple times the last occurrence is used const execArgv = [...defaultArgs, ...debugArgs, ...capExecArgs]; // bump up worker count this._runnerStarted++; // run worker hook to allow modify runtime and capabilities of a specific worker log.info('Run onWorkerStart hook'); await runLauncherHook(config.onWorkerStart, runnerId, caps, specs, this._args, execArgv) .catch((error) => this._workerHookError(error)); await runServiceHook(this._launcher, 'onWorkerStart', runnerId, caps, specs, this._args, execArgv) .catch((error) => this._workerHookError(error)); // prefer launcher settings in capabilities over general launcher const worker = await this.runner.run({ cid: runnerId, command: 'run', configFile: this._configFilePath, args: { ...this._args, ...(config?.autoCompileOpts ? { autoCompileOpts: config.autoCompileOpts } : {}), /** * Pass on user and key values to ensure they are available in the worker process when using * environment variables that were locally exported but not part of the environment. */ user: config.user, key: config.key }, caps, specs, execArgv, retries }); worker.on('message', this.interface.onMessage.bind(this.interface)); worker.on('error', this.interface.onMessage.bind(this.interface)); worker.on('exit', (code) => { if (!this.configParser.getConfig().groupLogsByTestSpec) { return; } if (code.exitCode === 0) { console.log(WORKER_GROUPLOGS_MESSAGES.normalExit(code.cid)); } else { console.log(WORKER_GROUPLOGS_MESSAGES.exitWithError(code.cid)); } worker.logsAggregator.forEach((logLine) => { console.log(logLine.replace(new RegExp('\\n$'), '')); }); }); worker.on('exit', this._endHandler.bind(this)); } _workerHookError(error) { if (!this.interface) { throw new Error('Internal Error: no interface initialized, call run() first'); } this.interface.logHookError(error); if (this._resolve) { this._resolve(1); } } /** * generates a runner id * @param {number} cid capability id (unique identifier for a capability) * @return {String} runner id (combination of cid and test id e.g. 0a, 0b, 1a, 1b ...) */ _getRunnerId(cid) { if (!this._rid[cid]) { this._rid[cid] = 0; } return `${cid}-${this._rid[cid]++}`; } /** * Close test runner process once all child processes have exited * @param {number} cid Capabilities ID * @param {number} exitCode exit code of child process * @param {Array} specs Specs that were run * @param {number} retries Number or retries remaining */ async _endHandler({ cid: rid, exitCode, specs, retries }) { const passed = this._isWatchModeHalted() || exitCode === 0; if (!passed && retries > 0) { // Default is true, so test for false explicitly const requeue = this.configParser.getConfig().specFileRetriesDeferred !== false ? 'push' : 'unshift'; this._schedule[parseInt(rid, 10)].specs[requeue]({ files: specs, retries: retries - 1, rid }); } else { this._exitCode = this._isWatchModeHalted() ? 0 : this._exitCode || exitCode; this._runnerFailed += !passed ? 1 : 0; } /** * avoid emitting job:end if watch mode has been stopped by user */ if (!this._isWatchModeHalted() && this.interface) { this.interface.emit('job:end', { cid: rid, passed, retries }); } /** * Update schedule now this process has ended * get cid (capability id) from rid (runner id) */ const cid = parseInt(rid, 10); this._schedule[cid].availableInstances++; this._schedule[cid].runningInstances--; log.info('Run onWorkerEnd hook'); const config = this.configParser.getConfig(); await runLauncherHook(config.onWorkerEnd, rid, exitCode, specs, retries) .catch((error) => this._workerHookError(error)); await runServiceHook(this._launcher, 'onWorkerEnd', rid, exitCode, specs, retries) .catch((error) => this._workerHookError(error)); /** * do nothing if * - there are specs to be executed * - we are running watch mode */ const shouldRunSpecs = this._runSpecs(); const inWatchMode = this._isWatchMode && !this._hasTriggeredExitRoutine; if (!shouldRunSpecs || inWatchMode) { /** * print reporter results when in watch mode */ if (inWatchMode) { this.interface?.finalise(); } return; } if (this._resolve) { this._resolve(passed ? this._exitCode : 1); } } /** * We need exitHandler to catch SIGINT / SIGTERM events. * Make sure all started selenium sessions get closed properly and prevent * having dead driver processes. To do so let the runner end its Selenium * session first before killing */ _exitHandler(callback) { if (!callback || !this.runner || !this.interface) { return; } if (this._hasTriggeredExitRoutine) { return callback(true); } this._hasTriggeredExitRoutine = true; this.interface.sigintTrigger(); return this.runner.shutdown().then(callback); } /** * returns true if user stopped watch mode, ex with ctrl+c * @returns {boolean} */ _isWatchModeHalted() { return this._isWatchMode && this._hasTriggeredExitRoutine; } } export default Launcher;