eslint-rule-benchmark
Version:
Benchmark ESLint rules with detailed performance metrics for CI and plugin development
145 lines (144 loc) • 4.97 kB
JavaScript
import 'node:worker_threads'
import path from 'node:path'
import cac from 'cac'
import {
DEFAULT_REPORTER_FORMAT,
DEFAULT_ITERATIONS,
DEFAULT_WARMUP_ITERATIONS,
DEFAULT_TIMEOUT_MS,
} from '../constants/index.js'
import { runBenchmarksFromConfig } from '../runners/run-benchmarks-from-config.js'
import { validateConfig } from '../core/config/validate-config.js'
import { loadConfig } from '../core/config/load-config.js'
import { version } from '../package.json.js'
function run() {
let cli = cac('eslint-rule-benchmark')
cli.version(version).help()
cli
.command('run', 'Run benchmarks from config')
.option('--config <path>', 'Path to configuration file')
.option('--eslint-config <config>', 'Path to ESLint config file')
.option('--report <format>', 'Report format (console, json, markdown)', {
default: DEFAULT_REPORTER_FORMAT,
})
.option('--output <file>', 'Output file for the report')
.action(async options => {
try {
let { filepath, config } = await loadConfig(options.config)
let configDirectory = path.dirname(filepath)
let errors = await validateConfig(config, configDirectory)
if (errors.length > 0) {
console.error('Configuration validation errors:')
for (let error of errors) {
console.error(`- ${error}`)
}
process.exit(1)
}
let reporterOptionsArray = [
{
format: options.report ?? DEFAULT_REPORTER_FORMAT,
outputPath: options.output,
},
]
await runBenchmarksFromConfig({
eslintConfigFile: options.eslintConfig,
reporterOptions: reporterOptionsArray,
userConfig: config,
configDirectory,
})
} catch (error) {
let errorValue = error
console.error(`Error: ${errorValue.message}`)
process.exit(1)
}
})
cli
.command('run-single', 'Run benchmark on a single ESLint rule')
.option('--rule <rule>', 'Path to the ESLint rule file')
.option('--name <name>', 'Name of the rule to benchmark')
.option('--eslint-config <config>', 'Path to ESLint config file')
.option('--source <source>', 'Path to directory or file with test cases')
.option('--iterations <number>', 'Number of benchmark iterations', {
default: DEFAULT_ITERATIONS,
})
.option('--warmup <number>', 'Number of warmup iterations', {
default: DEFAULT_WARMUP_ITERATIONS,
})
.option(
'--max-duration <number>',
'Target time in ms for benchmarking (lower values = fewer iterations)',
{
default: DEFAULT_TIMEOUT_MS,
},
)
.option('--report <format>', 'Report format (console, json, markdown)', {
default: DEFAULT_REPORTER_FORMAT,
})
.option('--output <file>', 'Output file for the report')
.action(async options => {
try {
if (!options.rule) {
throw new Error('Rule path (--rule) is required')
}
if (!options.name) {
throw new Error('Rule name/ID (--name) is required')
}
if (!options.source) {
throw new Error('Source path (--source) is required')
}
let reporterOptionsArray = [
{
outputPath: options.output,
format: options.report,
},
]
let constructedUserConfig = {
tests: [
{
cases: [
{
testPath: options.source,
},
],
name: `CLI: ${options.name}`,
rulePath: options.rule,
ruleId: options.name,
},
],
warmup: {
iterations: options.warmup > 0 ? options.warmup : void 0,
enabled: options.warmup > 0,
},
iterations: options.iterations > 0 ? options.iterations : void 0,
timeout: options.maxDuration > 0 ? options.maxDuration : void 0,
}
let configDirectory = process.cwd()
let errors = await validateConfig(
constructedUserConfig,
configDirectory,
)
if (errors.length > 0) {
console.error('Constructed configuration validation errors:')
for (let error of errors) {
console.error(`- ${error}`)
}
process.exit(1)
}
console.info(`Running benchmark for rule ${options.name}...`)
console.info(`Using rule file: ${options.rule}`)
console.info(`Using source: ${options.source}`)
await runBenchmarksFromConfig({
eslintConfigFile: options.eslintConfig,
reporterOptions: reporterOptionsArray,
userConfig: constructedUserConfig,
configDirectory,
})
} catch (error) {
let errorValue = error
console.error(`Error: ${errorValue.message}`)
process.exit(1)
}
})
cli.parse()
}
export { run }