@usebruno/cli
Version:
With Bruno CLI, you can now run your API collections with ease using simple command line commands.
1,153 lines (1,028 loc) • 41.9 kB
JavaScript
const fs = require('fs');
const chalk = require('chalk');
const path = require('path');
const { forOwn, cloneDeep } = require('lodash');
const { getRunnerSummary } = require('@usebruno/common/runner');
const { exists, parseCSV } = require('../utils/filesystem');
const { runSingleRequest } = require('../runner/run-single-request');
const { getEnvVars } = require('../utils/bru');
const { parseEnvironmentJson } = require('../utils/environment');
const { isRequestTagsIncluded } = require("@usebruno/common")
const makeJUnitOutput = require('../reporters/junit');
const makeHtmlOutput = require('../reporters/html');
const { getOptions } = require('../utils/bru');
const { parseDotEnv, parseEnvironment } = require('@usebruno/filestore');
const { getExternalSecretsData } = require('../utils/external-secrets');
const { sanitizeRunnerResults } = require('../utils/sanitize-results');
const { interpolateString } = require('../runner/interpolate-string');
const constants = require('../constants');
const { findItemInCollection, createCollectionJsonFromPathname, getCallStack } = require('../utils/collection');
const { hasExecutableTestInScript } = require('../utils/request');
const command = 'run [paths...]';
const desc = 'Run one or more requests/folders';
const Table = require('cli-table3');
/**
* Calculates a success rate for an iteration as a weighted average across requests.
*
* Rationale:
* - Each non-skipped request contributes equally (uniform per-request weight).
* - A request's success is the fraction of its checks that passed (tests + assertions).
* - Requests with no checks are considered fully successful unless the request errored.
*
* Steps:
* 1) Exclude skipped requests from the calculation.
* 2) Split 100% equally among remaining requests to get per-request weight.
* 3) For each request:
* - If checks exist: contribution = (passedChecks / totalChecks) * requestWeight.
* - If no checks: contribution = requestWeight when no error, else 0.
* 4) Sum contributions, return as percentage string with one decimal.
*/
const calculateSuccessRate = (iteration) => {
const iterationResults = Array.isArray(iteration.results)
? iteration.results
: [];
// Exclude skipped requests from the calculation
const eligibleRequests = iterationResults.filter((res) => !res.skipped);
if (eligibleRequests.length === 0) {
return '0%';
}
// Distribute 100% evenly across eligible requests
const PERCENT_SCALE = 100;
const weightPerRequest = PERCENT_SCALE / eligibleRequests.length;
const computeRequestContribution = (requestResult, requestWeight) => {
const tests = Array.isArray(requestResult.testResults)
? requestResult.testResults
: [];
const assertions = Array.isArray(requestResult.assertionResults)
? requestResult.assertionResults
: [];
const totalChecks = tests.length + assertions.length;
if (totalChecks > 0) {
const passedChecks =
tests.filter((t) => t.status === 'pass').length +
assertions.filter((a) => a.status === 'pass').length;
return (passedChecks / totalChecks) * requestWeight;
}
// No checks; full credit if no error, else zero
return requestResult?.error ? 0 : requestWeight;
};
const totalSuccessWeight = eligibleRequests.reduce(
(acc, req) => acc + computeRequestContribution(req, weightPerRequest),
0
);
return `${totalSuccessWeight.toFixed(1)}%`;
};
// Formats the Requests cell as: "<total> (<non-zero breakdown>)"
// Example: 10 (8 Passed, 1 Failed, 1 Skipped)
function formatRequestsCellFromSummary(summary) {
const total = summary.totalRequests || 0;
const passed = summary.passedRequests || 0;
const failedOrErrored = (summary.failedRequests || 0) + (summary.errorRequests || 0);
const skipped = summary.skippedRequests || 0;
const parts = [];
if (passed > 0) {
parts.push(chalk.green(`${passed} Passed`));
}
if (failedOrErrored > 0) {
parts.push(chalk.red(`${failedOrErrored} Failed`));
}
if (skipped > 0) {
parts.push(chalk.magenta(`${skipped} Skipped`));
}
return parts.length ? `${total} (${parts.join(', ')})` : `${total}`;
}
// Creates and prints a generic table with given headers and rows
function printGenericTable(headers, rows, title) {
const colAligns = headers.map((_, idx) => (idx === 0 ? 'left' : 'center'));
const table = new Table({ head: headers, style: { head: [], border: [] }, colAligns });
rows.forEach((row) => table.push(row));
console.log('\n' + chalk.bold(title));
console.log(table.toString());
}
// Prints a summary table for single run results
function printSingleRunTable(results) {
const iteration = results[0];
const summary = iteration.summary;
const duration = iteration.results.reduce(
(acc, res) => acc + (res.response?.responseTime || 0),
0
);
const hasFailures =
summary.failedRequests > 0 ||
summary.failedAssertions > 0 ||
summary.failedTests > 0 ||
(summary.errorRequests || 0) > 0;
const status = hasFailures
? chalk.red.bold('✗ FAIL')
: chalk.green.bold('✓ PASS');
const requests = formatRequestsCellFromSummary(summary);
const tests = `${summary.passedTests}/${summary.totalTests}`;
const assertions = `${summary.passedAssertions}/${summary.totalAssertions}`;
const headers = [chalk.bold('Metric'), chalk.bold('Result')];
const rows = [
['Status', status],
['Requests', requests],
['Tests', tests],
['Assertions', assertions],
['Duration (ms)', duration],
];
printGenericTable(headers, rows, '📊 Execution Summary');
}
// Prints a summary table for iteration mode results
function printIterationTable(results, isParallelRun) {
const headers = [
chalk.bold('Iteration'),
chalk.bold('Status'),
chalk.bold('Requests'),
chalk.bold('Tests'),
chalk.bold('Assertions'),
chalk.bold('Duration (ms)'),
];
const aggregateTotals = {
requests: { total: 0, passed: 0, failed: 0, skipped: 0 },
tests: { total: 0, passed: 0, failed: 0 },
assertions: { total: 0, passed: 0, failed: 0 },
duration: 0,
allPassed: true,
};
const rows = [];
results.forEach((iteration, idx) => {
const summary = iteration.summary;
const duration = iteration.results.reduce(
(acc, res) => acc + (res.response?.responseTime || 0),
0
);
const hasFailures =
summary.failedRequests > 0 ||
summary.failedAssertions > 0 ||
summary.failedTests > 0 ||
(summary.errorRequests || 0) > 0;
if (hasFailures) {
aggregateTotals.allPassed = false;
}
const statusCell = hasFailures
? chalk.red.bold('✗ FAIL')
: chalk.green.bold('✓ PASS');
const requests = formatRequestsCellFromSummary(summary);
const tests = `${summary.passedTests}/${summary.totalTests}`;
const assertions = `${summary.passedAssertions}/${summary.totalAssertions}`;
rows.push([
`#${idx + 1}`,
statusCell,
requests,
tests,
assertions,
duration,
]);
// accumulate
aggregateTotals.requests.total += summary.totalRequests;
aggregateTotals.requests.passed += summary.passedRequests;
const { failedRequests = 0, errorRequests = 0 } = summary;
aggregateTotals.requests.failed += failedRequests + errorRequests;
aggregateTotals.requests.skipped += summary.skippedRequests || 0;
aggregateTotals.tests.total += summary.totalTests;
aggregateTotals.tests.passed += summary.passedTests;
aggregateTotals.tests.failed += summary.failedTests;
aggregateTotals.assertions.total += summary.totalAssertions;
aggregateTotals.assertions.passed += summary.passedAssertions;
aggregateTotals.assertions.failed += summary.failedAssertions;
aggregateTotals.duration += duration;
});
const totalRequestsFormatted = formatRequestsCellFromSummary({
totalRequests: aggregateTotals.requests.total,
passedRequests: aggregateTotals.requests.passed,
failedRequests: aggregateTotals.requests.failed,
errorRequests: 0,
skippedRequests: aggregateTotals.requests.skipped,
});
const totalRowDuration = isParallelRun
? Math.max(
...results.map((iter) =>
iter.results.reduce(
(acc, res) => acc + (res.response?.responseTime || 0),
0
)
)
)
: aggregateTotals.duration;
const overallStatusCell = aggregateTotals.allPassed
? chalk.green.bold('✓ PASS')
: chalk.red.bold('✗ FAIL');
rows.push([
chalk.bold('TOTAL'),
chalk.bold(overallStatusCell),
chalk.bold(totalRequestsFormatted),
chalk.bold(`${aggregateTotals.tests.passed}/${aggregateTotals.tests.total}`),
chalk.bold(`${aggregateTotals.assertions.passed}/${aggregateTotals.assertions.total}`),
chalk.bold(totalRowDuration),
]);
printGenericTable(headers, rows, '📊 Iteration Summary Table');
}
/**
* Prints a summary table for run results.
*
* Modes:
* - Single run: shows key metrics and status.
* - Iteration mode: one row per iteration plus a TOTAL row.
*
* Notes:
* - Success Rate uses `calculateSuccessRate`, which gives equal weight to each non-skipped request,
* based on the fraction of passed checks (tests + assertions).
* - In parallel iteration mode, the TOTAL duration is the longest iteration duration (not the sum).
*/
function printUnifiedSummaryTable(
results,
isIterationMode = false,
isParallelRun = false
) {
if (!isIterationMode) {
printSingleRunTable(results);
} else {
printIterationTable(results, isParallelRun);
}
}
const getJsSandboxRuntime = (sandbox) => {
return sandbox === 'safe' ? 'quickjs' : 'vm2';
};
const builder = async (yargs) => {
yargs
.option('r', {
describe: 'Indicates a recursive run',
type: 'boolean',
default: false
})
.option('cacert', {
type: 'string',
description: 'CA certificate to verify peer against'
})
.option('ignore-truststore', {
type: 'boolean',
default: false,
description:
'The specified custom CA certificate (--cacert) will be used exclusively and the default truststore is ignored, if this option is specified. Evaluated in combination with "--cacert" only.'
})
.option('disable-cookies', {
type: 'boolean',
default: false,
description: 'Automatically save and sent cookies with requests'
})
.option('env', {
describe: 'Environment variables',
type: 'string'
})
.option('env-file', {
describe: 'Path to environment file (.bru or .json) - absolute or relative',
type: 'string'
})
.option('env-var', {
describe: 'Overwrite a single environment variable, multiple usages possible',
type: 'string'
})
.option('sandbox', {
describe: 'Javascript sandbox to use; available sandboxes are "developer" (default) or "safe"',
default: 'developer',
type: 'string'
})
.option('output', {
alias: 'o',
describe: 'Path to write file results to',
type: 'string'
})
.option('format', {
alias: 'f',
describe: 'Format of the file results; available formats are "json" (default), "junit" or "html"',
default: 'json',
type: 'string'
})
.option('reporter-json', {
describe: 'Path to write json file results to',
type: 'string'
})
.option('reporter-junit', {
describe: 'Path to write junit file results to',
type: 'string'
})
.option('reporter-html', {
describe: 'Path to write html file results to',
type: 'string'
})
.option('insecure', {
type: 'boolean',
description: 'Allow insecure server connections'
})
.option('tests-only', {
type: 'boolean',
description: 'Only run requests that have a test or active assertion'
})
.option('bail', {
type: 'boolean',
description: 'Stop execution after a failure of a request, test, or assertion'
})
.option('parallel', {
type: 'boolean',
description: 'Execute CSV iterations in parallel instead of sequentially'
})
.option('verbose', {
type: 'boolean',
description: 'Allow verbose output for debugging purposes'
})
.option('csv-file-path', {
describe: 'Path to the CSV file',
type: 'string'
})
.option('json-file-path', {
describe: 'Path to the JSON data file',
type: 'string'
})
.option('iteration-count', {
describe: 'Number of iterations',
type: 'string'
})
.option('reporter-skip-all-headers', {
type: 'boolean',
description: 'Omit headers from the reporter output',
default: false
})
.option('reporter-skip-headers', {
type: 'array',
description: 'Skip specific headers from the reporter output',
default: []
})
.option('client-cert-config', {
type: 'string',
description: 'Path to the Client certificate config file used for securing the connection in the request'
})
.option('--noproxy', {
type: 'boolean',
description: 'Disable all proxy settings (both collection-defined and system proxies)',
default: false
})
.option('delay', {
type:"number",
description: "Delay between each requests (in miliseconds)"
})
.option('tags', {
type: 'string',
description: 'Tags to include in the run'
})
.option('exclude-tags', {
type: 'string',
description: 'Tags to exclude from the run'
})
.option('verbose', {
type: 'boolean',
description: 'Allow verbose output for debugging purposes'
})
.example('$0 run request.bru', 'Run a request')
.example('$0 run request.bru --env local', 'Run a request with the environment set to local')
.example('$0 run request.bru --env-file env.bru', 'Run a request with the environment from env.bru file')
.example('$0 run folder', 'Run all requests in a folder')
.example('$0 run folder -r', 'Run all requests in a folder recursively')
.example('$0 run request.bru folder', 'Run a request and all requests in a folder')
.example('$0 run --reporter-skip-all-headers', 'Run all requests in a folder recursively with omitted headers from the reporter output')
.example(
'$0 run --reporter-skip-headers "Authorization"',
'Run all requests in a folder recursively with skipped headers from the reporter output'
)
.example(
'$0 run request.bru --env local --env-var secret=xxx',
'Run a request with the environment set to local and overwrite the variable secret with value xxx'
)
.example(
'$0 run request.bru --output results.json',
'Run a request and write the results to results.json in the current directory'
)
.example(
'$0 run request.bru --output results.xml --format junit',
'Run a request and write the results to results.xml in junit format in the current directory'
)
.example(
'$0 run request.bru --output results.html --format html',
'Run a request and write the results to results.html in html format in the current directory'
)
.example(
'$0 run request.bru --reporter-junit results.xml --reporter-html results.html',
'Run a request and write the results to results.html in html format and results.xml in junit format in the current directory'
)
.example('$0 run request.bru --tests-only', 'Run all requests that have a test')
.example(
'$0 run request.bru --cacert myCustomCA.pem',
'Use a custom CA certificate in combination with the default truststore when validating the peer of this request.'
)
.example(
'$0 run folder --cacert myCustomCA.pem --ignore-truststore',
'Use a custom CA certificate exclusively when validating the peers of the requests in the specified folder.'
)
.example('$0 run --client-cert-config client-cert-config.json', 'Run a request with Client certificate configurations')
.example('$0 run folder --delay delayInMs', 'Run a folder with given miliseconds delay between each requests.')
.example('$0 run --noproxy', 'Run requests with system proxy disabled')
.example(
'$0 run folder --tags=hello,world --exclude-tags=skip',
'Run only requests with tags "hello" or "world" and exclude any request with tag "skip".'
)
.example(
'$0 run folder --csv-file-path data.csv --parallel',
'Run all requests in a folder with CSV data in parallel instead of sequentially.'
);
};
const handler = async function (argv) {
try {
let {
paths,
cacert,
ignoreTruststore,
disableCookies,
env,
envFile,
envVar,
insecure,
r: recursive,
output: outputPath,
format,
reporterJson,
reporterJunit,
reporterHtml,
sandbox,
testsOnly,
bail,
verbose,
csvFilePath,
jsonFilePath,
iterationCount = 1,
reporterSkipAllHeaders,
reporterSkipHeaders,
clientCertConfig,
noproxy,
delay,
tags: includeTags,
excludeTags,
parallel
} = argv;
const collectionPath = process.cwd();
let collection = createCollectionJsonFromPathname(collectionPath);
const { root: collectionRoot, brunoConfig } = collection;
if (clientCertConfig) {
try {
const clientCertConfigExists = await exists(clientCertConfig);
if (!clientCertConfigExists) {
console.error(chalk.red(`Client Certificate Config file "${clientCertConfig}" does not exist.`));
process.exit(constants.EXIT_STATUS.ERROR_FILE_NOT_FOUND);
}
const clientCertConfigFileContent = fs.readFileSync(clientCertConfig, 'utf8');
let clientCertConfigJson;
try {
clientCertConfigJson = JSON.parse(clientCertConfigFileContent);
} catch (err) {
console.error(chalk.red(`Failed to parse Client Certificate Config JSON: ${err.message}`));
process.exit(constants.EXIT_STATUS.ERROR_INVALID_FILE);
}
if (clientCertConfigJson?.enabled && Array.isArray(clientCertConfigJson?.certs)) {
if (brunoConfig.clientCertificates) {
brunoConfig.clientCertificates.certs.push(...clientCertConfigJson.certs);
} else {
brunoConfig.clientCertificates = { certs: clientCertConfigJson.certs };
}
console.log(chalk.green(`Client certificates has been added`));
} else {
console.warn(chalk.yellow(`Client certificate configuration is enabled, but it either contains no valid "certs" array or the added configuration has been set to false`));
}
} catch (err) {
console.error(chalk.red(`Unexpected error: ${err.message}`));
process.exit(constants.EXIT_STATUS.ERROR_UNKNOWN);
}
}
let runtimeVariables = {};
let envVars = {};
let dotEnvSecrets = [];
let externalSecretVariables = {};
// Track secret variable names from the selected environment and any overridden secret values
let envSecretNamesSet = new Set();
let overriddenSecretValues = [];
if (env && envFile) {
console.error(chalk.red(`Cannot use both --env and --env-file options together`));
process.exit(constants.EXIT_STATUS.ERROR_MALFORMED_ENV_OVERRIDE);
}
if (envFile || env) {
const envFilePath = envFile
? path.resolve(collectionPath, envFile)
: path.join(collectionPath, 'environments', `${env}.bru`);
const envFileExists = await exists(envFilePath);
if (!envFileExists) {
const errorPath = envFile || `environments/${env}.bru`;
console.error(chalk.red(`Environment file not found: `) + chalk.dim(errorPath));
process.exit(constants.EXIT_STATUS.ERROR_ENV_NOT_FOUND);
}
const ext = path.extname(envFilePath).toLowerCase();
if (ext === '.json') {
// Parse Bruno schema JSON environment
let envJsonContent;
try {
envJsonContent = fs.readFileSync(envFilePath, 'utf8');
const parsed = JSON.parse(envJsonContent);
const normalizedEnv = parseEnvironmentJson(parsed);
envVars = getEnvVars(normalizedEnv);
const rawName = normalizedEnv?.name;
const trimmedName = typeof rawName === 'string' ? rawName.trim() : '';
envVars.__name__ = trimmedName || path.basename(envFilePath, '.json');
} catch (err) {
console.error(chalk.red(`Failed to parse Environment JSON: ${err.message}`));
process.exit(constants.EXIT_STATUS.ERROR_INVALID_FILE);
}
} else {
// Default to .bru parsing
const envBruContent = fs.readFileSync(envFilePath, 'utf8').replace(/\r\n/g, '\n');
const envJson = parseEnvironment(envBruContent);
envVars = getEnvVars(envJson);
envVars.__name__ = envFile ? path.basename(envFilePath, '.bru') : env;
// collect secret var names for masking decisions
envSecretNamesSet = new Set((envJson?.variables || [])
.filter((v) => v && v.secret === true && v.name)
.map((v) => v.name));
}
}
if (envVar) {
let processVars;
if (typeof envVar === 'string') {
processVars = [envVar];
} else if (typeof envVar === 'object' && Array.isArray(envVar)) {
processVars = envVar;
} else {
console.error(chalk.red(`overridable environment variables not parsable: use name=value`));
process.exit(constants.EXIT_STATUS.ERROR_MALFORMED_ENV_OVERRIDE);
}
if (processVars && Array.isArray(processVars)) {
for (const value of processVars.values()) {
// split the string at the first equals sign
const match = value.match(/^([^=]+)=(.*)$/);
if (!match) {
console.error(
chalk.red(`Overridable environment variable not correct: use name=value - presented: `) +
chalk.dim(`${value}`)
);
process.exit(constants.EXIT_STATUS.ERROR_INCORRECT_ENV_OVERRIDE);
}
// Track secret values for report masking: if the overridden variable name exists in the environment's vars:secret block, add its value to the masking list
// match[1] = variable name (e.g., "clientid" from --env-var clientid=12345)
// match[2] = variable value (e.g., "12345" from --env-var clientid=12345)
if (envSecretNamesSet && envSecretNamesSet.has(match[1]) && match[2] !== undefined) {
const v = String(match[2]);
if (v.length) overriddenSecretValues.push(v);
}
envVars[match[1]] = match[2];
}
}
}
const options = getOptions();
if (bail) {
options['bail'] = true;
}
if (insecure) {
options['insecure'] = true;
}
if (disableCookies) {
options['disableCookies'] = true;
}
if (noproxy) {
options['noproxy'] = true;
}
if (verbose) {
options['verbose'] = true;
}
if (cacert && cacert.length) {
if (insecure) {
console.error(chalk.red(`Ignoring the cacert option since insecure connections are enabled`));
} else {
const pathExists = await exists(cacert);
if (pathExists) {
options['cacert'] = cacert;
} else {
console.error(chalk.red(`Cacert File ${cacert} does not exist`));
}
}
}
options['ignoreTruststore'] = ignoreTruststore;
if (verbose) {
options['verbose'] = true;
}
includeTags = includeTags ? includeTags.split(',') : [];
excludeTags = excludeTags ? excludeTags.split(',') : [];
if (['json', 'junit', 'html'].indexOf(format) === -1) {
console.error(chalk.red(`Format must be one of "json", "junit or "html"`));
process.exit(constants.EXIT_STATUS.ERROR_INCORRECT_OUTPUT_FORMAT);
}
let formats = {};
// Maintains back compat with --format and --output
if (outputPath && outputPath.length) {
formats[format] = outputPath;
}
if (reporterHtml && reporterHtml.length) {
formats['html'] = reporterHtml;
}
if (reporterJson && reporterJson.length) {
formats['json'] = reporterJson;
}
if (reporterJunit && reporterJunit.length) {
formats['junit'] = reporterJunit;
}
// load .env file at root of collection if it exists
const dotEnvPath = path.join(collectionPath, '.env');
const dotEnvExists = await exists(dotEnvPath);
const processEnvVars = {
...process.env
};
if (dotEnvExists) {
const content = fs.readFileSync(dotEnvPath, 'utf8');
const jsonData = parseDotEnv(content);
// Treat .env values as secrets for value-based masking in CLI
dotEnvSecrets = Object.values(jsonData)
.filter((v) => v !== undefined && v !== null && String(v).length > 0)
.map((v) => String(v));
forOwn(jsonData, (value, key) => {
processEnvVars[key] = value;
});
}
if (env) {
const externalSecretsJsonPath = path.join(collectionPath, 'secrets.json');
const externalSecretsExists = await exists(externalSecretsJsonPath);
if (externalSecretsExists) {
const externalSecretsContent = fs.readFileSync(externalSecretsJsonPath, 'utf8');
const interpolationOptions = {
envVars,
runtimeVariables,
processEnvVars
};
const externalSecretsInterpolatedContent = interpolateString(externalSecretsContent, interpolationOptions);
const externalSecretsJson = JSON.parse(externalSecretsInterpolatedContent);
const {
cli: externalSecretsProviderConfig,
data: externalSecretsPathsData,
type: externalSecretsType
} = externalSecretsJson;
const externalSecretsForCurrentEnvironment = externalSecretsPathsData?.find(
(d) => d?.environment === env
)?.secrets;
const externalSecretsPathsForCurrentEnvironment = externalSecretsForCurrentEnvironment?.map((s) => s.path);
if (externalSecretsProviderConfig && externalSecretsPathsForCurrentEnvironment) {
verbose && console.log(chalk.yellow('Fetching external secrets... \n'));
try {
const secrets = await getExternalSecretsData({
type: externalSecretsType,
config: externalSecretsProviderConfig,
paths: externalSecretsPathsForCurrentEnvironment,
debug: verbose
});
secrets.forEach((s, idx) => {
if (s?.error) {
verbose &&
console.error(
chalk.red(`${idx + 1}. Couldn't fetch secret for path: ${s?.path}: `) +
chalk.dim(`${JSON.stringify(s?.error)}`)
);
return;
}
verbose && console.log(chalk.yellow(`${idx + 1}. Fetched secret for path: ${s?.path} \n`));
let secretName = externalSecretsForCurrentEnvironment?.find((x) => x.path === s.path)?.name;
if(typeof s?.data === 'string' || typeof s?.data === 'number') {
externalSecretVariables[`$secrets.${secretName}`] = s?.data;
} else {
Object.entries(s?.data).forEach(([key, value]) => {
externalSecretVariables[`$secrets.${secretName}.${key}`] = value;
});
}
});
} catch (err) {
verbose && console.error(chalk.red(`Fetching external secrets failed: `) + chalk.dim(`${err.message}`));
}
}
}
}
let requestItems = [];
let results = [];
if (!paths || !paths.length) {
paths = ['./'];
recursive = true;
}
const resolvedPaths = paths.map(p => path.resolve(process.cwd(), p));
for (const resolvedPath of resolvedPaths) {
const pathExists = await exists(resolvedPath);
if (!pathExists) {
console.error(chalk.red(`Path not found: ${resolvedPath}`));
process.exit(constants.EXIT_STATUS.ERROR_FILE_NOT_FOUND);
}
}
requestItems = getCallStack(resolvedPaths, collection, { recursive });
if (testsOnly) {
requestItems = requestItems.filter((item) => {
const requestHasTests = hasExecutableTestInScript(item.request?.tests);
const requestHasActiveAsserts = item.request?.assertions.some((x) => x.enabled) || false;
const preRequestScript = item.request?.script?.req;
const requestHasPreRequestTests = hasExecutableTestInScript(preRequestScript);
const postResponseScript = item.request?.script?.res;
const requestHasPostResponseTests = hasExecutableTestInScript(postResponseScript);
return requestHasTests || requestHasActiveAsserts || requestHasPreRequestTests || requestHasPostResponseTests;
});
}
requestItems = requestItems.filter((item) => {
return isRequestTagsIncluded(item.tags, includeTags, excludeTags);
});
let csvFileData = [];
if (csvFilePath) {
const csvPathExists = await exists(csvFilePath);
if (!csvPathExists) {
console.error(chalk.red(`CSV file ${csvFilePath} does not exist`));
process.exit(constants.EXIT_STATUS.ERROR_CSV_FILE_NOT_FOUND);
}
const csvData = fs.readFileSync(csvFilePath, 'utf8');
csvFileData = await parseCSV(csvData);
iterationCount = csvFileData?.length;
}
let jsonFileData = [];
if (jsonFilePath) {
const jsonPathExists = await exists(jsonFilePath);
if (!jsonPathExists) {
console.error(chalk.red(`CSV file ${jsonFilePath} does not exist`));
process.exit(constants.EXIT_STATUS.ERROR_JSON_FILE_NOT_FOUND);
}
const jsonData = fs.readFileSync(jsonFilePath, 'utf8');
jsonFileData = JSON.parse(jsonData);
iterationCount = jsonFileData?.length;
}
let iterationRunResults = [];
// Function to run a single iteration
const runIteration = async (iterationIndex) => {
let currentRequestIndex = 0;
const runtime = getJsSandboxRuntime(sandbox);
const csvDataVariables = csvFileData?.[iterationIndex] || {};
const jsonDataVariables = jsonFileData?.[iterationIndex] || {};
const hasCsvData = Object.keys(csvDataVariables).length > 0;
const hasJsonData = Object.keys(jsonDataVariables).length > 0;
const hasExternalData = hasCsvData || hasJsonData;
const resultsForCurrentIteration = [];
// Show an "Iteration: <n>" header only when it adds value and won't spam output.
// - We print it for sequential multi-iteration runs, or when external data (CSV/JSON) is used.
// - We suppress it for true parallel multi-iteration runs to keep logs compact.
const isMultiIteration = iterationCount > 1;
const isParallelMulti = parallel && isMultiIteration;
const shouldShowIterationHeader = !isParallelMulti && (hasExternalData || isMultiIteration);
if (shouldShowIterationHeader) {
console.log(`\n${chalk.green('Iteration:', iterationIndex + 1)}\n`);
}
if (verbose && hasCsvData) {
console.log(`${chalk.green('CSV data:')}\n${chalk.yellow(JSON.stringify(csvDataVariables, null, 2))}\n`);
}
if (verbose && hasJsonData) {
console.log(`${chalk.green('JSON data:')}\n${chalk.yellow(JSON.stringify(jsonDataVariables, null, 2))}\n`);
}
let iterationData = { ...csvDataVariables, ...jsonDataVariables }
const iterationRuntimeVariables = {
...runtimeVariables,
// the current iteration's data (csv/json row data) can be accessed via `bru.getVar`
// keeping this behaviour intact for backward compatibilty
// adding `iterationData` to the runtimeVariables
...iterationData
};
const iterationCollection = cloneDeep(collection);
iterationCollection.runnerIterationDetails = {
iterationIndex,
iterationData,
totalIterations: iterationCount
}
const runSingleRequestByPathname = async (relativeItemPathname) => {
return new Promise(async (resolve, reject) => {
let itemPathname = path.join(collectionPath, relativeItemPathname);
if (itemPathname && !itemPathname?.endsWith('.bru')) {
itemPathname = `${itemPathname}.bru`;
}
const requestItem = cloneDeep(findItemInCollection(iterationCollection, itemPathname));
if (requestItem) {
const res = await runSingleRequest(
requestItem,
collectionPath,
iterationRuntimeVariables,
envVars,
processEnvVars,
brunoConfig,
collectionRoot,
externalSecretVariables,
runtime,
iterationCollection,
runSingleRequestByPathname
);
resolve(res?.response);
}
reject(`bru.runRequest: invalid request path - ${itemPathname}`);
});
}
let nJumps = 0; // count the number of jumps to avoid infinite loops
while (currentRequestIndex < requestItems.length) {
const requestItem = cloneDeep(requestItems[currentRequestIndex]);
const { name, pathname } = requestItem;
const start = process.hrtime();
const result = await runSingleRequest(
requestItem,
collectionPath,
iterationRuntimeVariables,
envVars,
processEnvVars,
brunoConfig,
collectionRoot,
externalSecretVariables,
runtime,
iterationCollection,
runSingleRequestByPathname
);
const isLastRun = currentRequestIndex === requestItems.length - 1;
const isValidDelay = !Number.isNaN(delay) && delay > 0;
if(isValidDelay && !isLastRun){
console.log(chalk.yellow(`Waiting for ${delay}ms or ${(delay/1000).toFixed(3)}s before next request.`));
await new Promise((resolve) => setTimeout(resolve, delay));
}
if(Number.isNaN(delay) && !isLastRun){
console.log(chalk.red(`Ignoring delay because it's not a valid number.`));
}
results.push({
...result,
runDuration: process.hrtime(start)[0] + process.hrtime(start)[1] / 1e9,
name,
path: path.relative(collectionPath, pathname)?.replace?.('.bru', ''),
iterationIndex
});
resultsForCurrentIteration.push({
...result,
runDuration: process.hrtime(start)[0] + process.hrtime(start)[1] / 1e9,
name,
path: path.relative(collectionPath, pathname)?.replace?.('.bru', ''),
iterationIndex
});
if (reporterSkipAllHeaders) {
results.forEach((result) => {
result.request.headers = {};
result.response.headers = {};
});
}
const deleteHeaderIfExists = (headers, header) => {
Object.keys(headers).forEach((key) => {
if (key.toLowerCase() === header.toLowerCase()) {
delete headers[key];
}
});
};
if (reporterSkipHeaders?.length) {
results.forEach((result) => {
if (result.request?.headers) {
reporterSkipHeaders.forEach((header) => {
deleteHeaderIfExists(result.request.headers, header);
});
}
if (result.response?.headers) {
reporterSkipHeaders.forEach((header) => {
deleteHeaderIfExists(result.response.headers, header);
});
}
});
}
// bail if option is set and there is a failure
if (bail) {
const requestFailure = result?.error && !result?.skipped;
const testFailure = result?.testResults?.find((iter) => iter.status === 'fail');
const assertionFailure = result?.assertionResults?.find((iter) => iter.status === 'fail');
const preRequestTestFailure = result?.preRequestTestResults?.find((iter) => iter.status === 'fail');
const postResponseTestFailure = result?.postResponseTestResults?.find((iter) => iter.status === 'fail');
if (requestFailure || testFailure || assertionFailure || preRequestTestFailure || postResponseTestFailure) {
break;
}
}
if (result?.shouldStopRunnerExecution) {
break;
}
// determine next request
const nextRequestName = result?.nextRequestName;
if (nextRequestName !== undefined) {
nJumps++;
if (nJumps > 10000) {
console.error(chalk.red(`Too many jumps, possible infinite loop`));
process.exit(constants.EXIT_STATUS.ERROR_INFINTE_LOOP);
}
if (nextRequestName === null) {
break;
}
const nextRequestIdx = requestItems.findIndex((iter) => iter.name === nextRequestName);
if (nextRequestIdx >= 0) {
currentRequestIndex = nextRequestIdx;
} else {
console.error("Could not find request with name '" + nextRequestName + "'");
currentRequestIndex++;
}
} else {
currentRequestIndex++;
}
}
const iterationRunSummary = getRunnerSummary(resultsForCurrentIteration);
const iterationResult = {
iterationIndex,
summary: iterationRunSummary,
results: resultsForCurrentIteration
};
const totalTime = resultsForCurrentIteration.reduce((acc, res) => acc + res.response.responseTime, 0);
if (iterationCount > 1 && !(parallel && iterationCount > 1)) {
console.log(chalk.dim(chalk.grey(`Ran all requests - ${totalTime} ms`)));
}
return iterationResult;
};
if (parallel && iterationCount > 1) {
console.log(
chalk.blue(
`Executing ${iterationCount} iterations in parallel${
bail ? ' (bail: per-iteration stop on first failure)' : ''
}...\n`
)
);
const iterationPromises = [];
for (
let iterationIndex = 0;
iterationIndex < iterationCount;
iterationIndex++
) {
iterationPromises.push(runIteration(iterationIndex));
}
const iterationResults = await Promise.all(iterationPromises);
iterationRunResults = iterationResults.sort(
(a, b) => a.iterationIndex - b.iterationIndex
);
} else {
for (
let iterationIndex = 0;
iterationIndex < iterationCount;
iterationIndex++
) {
const iterationResult = await runIteration(iterationIndex);
iterationRunResults.push(iterationResult);
}
}
let summary;
if (iterationCount === 1) {
printUnifiedSummaryTable(iterationRunResults, false, false);
summary = getRunnerSummary(results);
} else {
printUnifiedSummaryTable(iterationRunResults, true, Boolean(parallel && iterationCount > 1));
summary = getRunnerSummary(results);
}
// Capture the run completion time
const runCompletionTime = new Date().toISOString();
// Extract environment name from envVars if available
const environmentName = envVars?.__name__ || null;
const formatKeys = Object.keys(formats);
if (formatKeys && formatKeys.length > 0) {
const outputJson = iterationRunResults;
/* Note: envVars is not included here because CLI cannot access encrypted secrets
stored by the Electron app. envVars only contains variable names for secrets,
not the actual secret values which are encrypted and stored separately. */
const secretValues = Array.from(new Set([
...dotEnvSecrets,
...overriddenSecretValues,
...Object.values(externalSecretVariables || {})
.filter((v) => v != null && ['string', 'number', 'boolean'].includes(typeof v))
.map(v => String(v))
]));
const reporters = {
'json': (path) => fs.writeFileSync(path, JSON.stringify(sanitizeRunnerResults(outputJson, { secretValues }), null, 2)),
'junit': (path) => makeJUnitOutput(results, path, { secretValues }),
html: (path) => makeHtmlOutput(outputJson, path, runCompletionTime, environmentName, { secretValues })
};
for (const formatter of Object.keys(formats))
{
const reportPath = formats[formatter];
const reporter = reporters[formatter];
// Skip formatters lacking an output path.
if (!reportPath || reportPath.length === 0) {
continue;
}
const outputDir = path.dirname(reportPath);
const outputDirExists = await exists(outputDir);
if (!outputDirExists) {
console.error(chalk.red(`Output directory ${outputDir} does not exist`));
process.exit(constants.EXIT_STATUS.ERROR_MISSING_OUTPUT_DIR);
}
if (!reporter) {
console.error(chalk.red(`Reporter ${formatter} does not exist`));
process.exit(constants.EXIT_STATUS.ERROR_INCORRECT_OUTPUT_FORMAT);
}
reporter(reportPath);
console.log(chalk.dim(chalk.grey(`Wrote ${formatter} results to ${reportPath}`)));
}
}
if ((summary.failedAssertions + summary.failedTests + summary.failedPreRequestTests + summary.failedPostResponseTests + summary.failedRequests > 0) || (summary?.errorRequests > 0)) {
process.exit(constants.EXIT_STATUS.ERROR_FAILED_COLLECTION);
}
} catch (err) {
console.log('Something went wrong');
console.error(chalk.red(err.message));
process.exit(constants.EXIT_STATUS.ERROR_GENERIC);
}
};
module.exports = {
command,
desc,
builder,
handler
};