@labnex/cli
Version:
CLI for Labnex, an AI-Powered Testing Automation Platform
448 lines ⢠21.8 kB
JavaScript
;
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.runCommand = void 0;
exports.runTests = runTests;
const commander_1 = require("commander");
const chalk_1 = __importDefault(require("chalk"));
const ora_1 = __importDefault(require("ora"));
const inquirer_1 = __importDefault(require("inquirer"));
const client_1 = require("../api/client");
const projectConfig_1 = require("../utils/projectConfig");
// LocalBrowserExecutor will be imported lazily so the CLI can build without the executor package built yet.
let LocalBrowserExecutor;
exports.runCommand = new commander_1.Command('run')
.description('Execute tests for a specified project using local or cloud resources.')
.option('-p, --project <codeOrId>', 'Project code or ID to run tests for')
.option('--project-id <id>', 'DEPRECATED: use --project instead')
.option('-t, --test-id <id>', 'Run a specific test case by ID')
.option('--test-ids <ids>', 'Comma-separated list of test case IDs to run')
.option('-e, --environment <env>', 'Environment to run tests against', 'staging')
.option('-m, --mode <mode>', 'Execution mode: local or cloud', 'local')
.option('--cloud', 'Shortcut for --mode cloud')
.option('-b, --base-url <url>', 'Base URL of the application under test')
.option('--optimize-ai', 'Enable AI optimization for element finding (deprecated ā use --ai-optimize)')
.option('--ai-optimize', 'Enable AI optimization for element finding')
.option('--username <username>', 'Username to use when tests contain login placeholders')
.option('--password <password>', 'Password to use when tests contain login placeholders')
.option('--parallel <number>', 'Number of parallel workers (cloud mode)', '4')
.option('--headless', 'Run in headless mode (local mode)', false)
.option('--timeout <ms>', 'Test timeout in milliseconds', '300000')
.option('--watch', 'Stream live updates until run completes')
.action(async (options) => {
try {
let projectId;
let projectCode;
if (options.project) {
if (/^[a-f0-9]{24}$/i.test(options.project)) {
projectId = options.project;
}
else {
projectCode = options.project.toUpperCase();
}
}
else if (options.projectId) {
projectId = options.projectId;
}
// Map --cloud flag to mode
if (options.cloud) {
options.mode = 'cloud';
}
// If we received a project *code*, look up its ID first.
if (!projectId && projectCode) {
const lookupSpinner = (0, ora_1.default)(`Resolving project code ${projectCode}...`).start();
try {
const response = await client_1.apiClient.getProjects();
if (response.success) {
const match = response.data.find((p) => p.projectCode === projectCode);
if (match) {
projectId = match._id;
lookupSpinner.succeed(chalk_1.default.green(`Project resolved: ${match.projectCode} (${match.name})`));
}
else {
lookupSpinner.fail(chalk_1.default.red(`Project code not found: ${projectCode}`));
}
}
else {
lookupSpinner.fail(chalk_1.default.red(`Failed to fetch projects: ${response.error || 'unknown error'}`));
}
}
catch (err) {
lookupSpinner.fail(chalk_1.default.red(`Error fetching projects: ${err.message}`));
}
}
if (!projectId) {
const spinner = (0, ora_1.default)('Fetching your projects...').start();
try {
const response = await client_1.apiClient.getProjects();
if (response.success && response.data && response.data.length > 0) {
spinner.stop();
const choices = response.data.map((p) => ({
name: `${p.name} (${p.projectCode})`,
value: p._id,
}));
const answer = await inquirer_1.default.prompt([
{
type: 'list',
name: 'selectedProject',
message: 'Which project would you like to run tests for?',
choices: choices,
},
]);
projectId = answer.selectedProject;
}
else if (response.success && response.data.length === 0) {
spinner.fail(chalk_1.default.yellow('You do not have any projects.'));
console.log(chalk_1.default.cyan('You can create one using: labnex projects create'));
return;
}
else {
spinner.fail(chalk_1.default.red(`Failed to fetch projects: ${response.message || 'Unknown error'}`));
return;
}
}
catch (error) {
spinner.fail(chalk_1.default.red(`Error fetching projects: ${error.message}`));
return;
}
}
const runOptions = { ...options, projectId };
if (options.testIds) {
runOptions.testIds = options.testIds.split(',');
}
// Normalize AI optimize flag across both variants
runOptions.optimizeAi = options.optimizeAi || options.aiOptimize;
await runTests(runOptions);
}
catch (error) {
console.error(chalk_1.default.red('ā Test execution failed:'), error.message);
if (process.env.LABNEX_VERBOSE === 'true') {
console.error(error.stack);
}
process.exit(1);
}
});
async function runTests(options) {
const projectId = options.projectId;
const testId = options.testId;
const testIds = options.testIds;
const environment = options.environment;
const mode = options.mode;
const aiOptimize = options.optimizeAi || options.aiOptimize;
const verbose = process.env.LABNEX_VERBOSE === 'true';
const pkg = require('../../package.json');
console.log(chalk_1.default.bold(`š Labnex CLI v${pkg.version}`));
console.log(chalk_1.default.gray('ā'.repeat(40)));
// Project info will be printed after fetching project details.
if (aiOptimize) {
console.log(chalk_1.default.cyan('ā AI optimization enabled'));
}
console.log(chalk_1.default.cyan(`ā Environment: ${environment}`));
if (options.parallel) {
console.log(chalk_1.default.cyan(`ā Parallel execution: ${options.parallel} workers`));
}
if (aiOptimize) {
console.log(chalk_1.default.gray(`š¤ AI Optimization: enabled`));
}
if (verbose) {
console.log(chalk_1.default.gray(`š Detailed logging: enabled`));
}
console.log('');
const projectsSpinner = (0, ora_1.default)('Fetching project details...').start();
let project = null;
try {
const projectsResponse = await client_1.apiClient.getProjects();
if (verbose) {
console.log('\n[DEBUG] apiClient.getProjects() response:', JSON.stringify(projectsResponse, null, 2));
}
if (projectsResponse.success && projectsResponse.data) {
project = projectsResponse.data.find((p) => p._id === projectId);
if (project) {
if (verbose) {
console.log('[DEBUG] Found project:', JSON.stringify(project, null, 2));
}
projectsSpinner.succeed(chalk_1.default.green(`ā
Project found: ${project.projectCode} (${project.name})`));
console.log(chalk_1.default.cyan(`ā Project: ${project.projectCode} (${project.name})`));
}
else {
projectsSpinner.fail(chalk_1.default.red(`ā Project not found: ${projectId}`));
return;
}
}
else {
projectsSpinner.fail(chalk_1.default.red(`ā Failed to fetch project details.`));
return;
}
}
catch (error) {
projectsSpinner.fail(chalk_1.default.red(`ā Error fetching project details: ${error.message}`));
return;
}
const testCasesSpinner = (0, ora_1.default)('Fetching test cases...').start();
let allTestCases = [];
try {
const testCasesResponse = await client_1.apiClient.getTestCases(projectId);
if (testCasesResponse.success && testCasesResponse.data) {
allTestCases = testCasesResponse.data;
testCasesSpinner.succeed(chalk_1.default.green(`ā
Found ${allTestCases.length} test cases.`));
}
else {
testCasesSpinner.fail(chalk_1.default.red(`ā Failed to fetch test cases.`));
return;
}
}
catch (error) {
testCasesSpinner.fail(chalk_1.default.red(`ā Error fetching test cases: ${error.message}`));
return;
}
let testCasesToRun = allTestCases;
if (testId) {
testCasesToRun = allTestCases.filter(tc => tc._id === testId);
console.log(chalk_1.default.blue(`Running a single test: ${testCasesToRun[0]?.title || testId}`));
}
else if (testIds && testIds.length > 0) {
testCasesToRun = allTestCases.filter(tc => testIds.includes(tc._id));
console.log(chalk_1.default.blue(`Running ${testCasesToRun.length} specific tests from AI optimization.`));
}
if (testCasesToRun.length === 0) {
console.log(chalk_1.default.yellow('No test cases to run.'));
return;
}
if (mode === 'local') {
await runTestsLocally(testCasesToRun, project, options);
}
else {
await runTestsInCloud(testCasesToRun, project, options);
}
}
async function runTestsLocally(testCases, project, options) {
const { headless, timeout, optimizeAi, baseUrl } = options;
console.log(chalk_1.default.cyan('\nš§ Starting local test execution...'));
console.log(chalk_1.default.gray(`Browser mode: ${headless ? 'Headless' : 'Headed'}`));
console.log(chalk_1.default.gray(`Timeout per test: ${timeout}ms`));
if (!LocalBrowserExecutor) {
const mod = await Promise.resolve().then(() => __importStar(require('@labnex/executor')));
LocalBrowserExecutor = mod.LocalBrowserExecutor;
}
let executor = new LocalBrowserExecutor({
headless,
aiOptimizationEnabled: optimizeAi
});
// --- AI Analysis banner (informational) ---
console.log(chalk_1.default.bold.cyan('\nš§ AI Analysis:'));
console.log(`⢠${chalk_1.default.yellow(testCases.length)} test cases found`);
if (optimizeAi) {
console.log('⢠Prioritizing critical path tests');
}
// rough estimate: 4s per test
const estSeconds = (testCases.length * 4).toFixed(0);
console.log(`⢠Estimated time: ${estSeconds} seconds`);
// helper for progress bar
const makeBar = (current, total, barWidth = 20) => {
const filled = Math.round((current / total) * barWidth);
return 'ā'.repeat(filled).padEnd(barWidth, 'ā');
};
try {
try {
await executor.initialize();
}
catch (initErr) {
if (!headless) {
console.log(chalk_1.default.yellow('\nā ļø Headed Chrome could not start ā retrying in headless mode.'));
executor.cleanup && await executor.cleanup();
const HeadlessExecutorMod = await Promise.resolve().then(() => __importStar(require('@labnex/executor')));
LocalBrowserExecutor = HeadlessExecutorMod.LocalBrowserExecutor;
executor = new LocalBrowserExecutor({ headless: true, aiOptimizationEnabled: optimizeAi });
await executor.initialize();
}
else {
throw initErr;
}
}
const results = [];
const total = testCases.length;
let index = 0;
for (const testCase of testCases) {
const result = await executor.executeTestCase(testCase._id, testCase.steps, testCase.expectedResult, testCase.baseUrl || baseUrl || '', testCase.title);
results.push(result);
// progress line (overwrite)
index++;
const bar = makeBar(index, total);
const statusIcon = result.status === 'passed' ? chalk_1.default.green('ā
') : chalk_1.default.red('ā');
const line = `${testCase.title.substring(0, 20)} ${bar} ${Math.round((index / total) * 100)}% (${index}/${total}) ${statusIcon} ${(result.duration / 1000).toFixed(1)}s`;
process.stdout.write(`\r${line}`);
}
process.stdout.write('\n');
await executor.cleanup();
console.log(chalk_1.default.bold.cyan('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
console.log(chalk_1.default.bold.cyan('ā Local Test Run Results ā'));
console.log(chalk_1.default.bold.cyan('āāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
results.forEach((result) => {
const testCase = testCases.find(tc => tc._id === result.testCaseId);
const title = testCase ? testCase.title : result.testCaseId;
const status = result.status === 'passed'
? chalk_1.default.green.bold('ā PASSED')
: chalk_1.default.red.bold('ā FAILED');
console.log(`\n${status} - ${title}`);
console.log(chalk_1.default.gray(` ID: ${result.testCaseId}`));
console.log(chalk_1.default.gray(` Duration: ${result.duration}ms`));
if (result.status === 'failed') {
const lastFailedStep = result.steps.find(s => s.status === 'failed');
if (lastFailedStep) {
console.log(chalk_1.default.red(` Error: ${lastFailedStep.message}`));
}
// console.log(chalk.red.dim(` Screenshot: ${result.screenshot || 'Not available'}`));
}
});
// summary block
const passed = results.filter(r => r.status === 'passed').length;
const failed = results.length - passed;
const durationTotal = results.reduce((acc, r) => acc + (r.duration || 0), 0);
console.log(chalk_1.default.bold.cyan('\nš Results Summary:'));
console.log(`⢠Passed: ${passed}/${results.length} tests ${failed === 0 ? chalk_1.default.green('ā
') : chalk_1.default.red('ā')}`);
console.log(`⢠Duration: ${(durationTotal / 1000).toFixed(1)} seconds`);
const successRate = ((passed / results.length) * 100).toFixed(0);
console.log(`⢠Success Rate: ${successRate}%`);
}
catch (error) {
console.error(chalk_1.default.red('\nā An error occurred during local test execution:'), error.message);
await executor.cleanup();
}
}
async function runTestsInCloud(testCases, project, options) {
let baseUrlOption = options.baseUrl;
// Attempt to read from project config if not provided
if (!baseUrlOption) {
const projCfg = (0, projectConfig_1.loadProjectConfig)();
if (projCfg?.baseUrl) {
baseUrlOption = projCfg.baseUrl;
}
}
if (!baseUrlOption) {
const answer = await inquirer_1.default.prompt([
{
type: 'input',
name: 'baseUrl',
message: 'Base URL of the application under test (e.g., https://example.com):',
validate: (input) => /^https?:\/\//i.test(input) || 'Please enter a valid http(s) URL',
when: () => options.mode === 'cloud'
}
]);
baseUrlOption = answer.baseUrl;
// Offer to remember
const { remember } = await inquirer_1.default.prompt([{ type: 'confirm', name: 'remember', message: 'Save this base URL to labnex.config.json?', default: true }]);
if (remember) {
(0, projectConfig_1.saveProjectConfig)({ baseUrl: baseUrlOption });
console.log(chalk_1.default.green('Base URL saved to labnex.config.json'));
}
}
// Detect if any placeholder credentials present in steps
const needsUsername = testCases.some((tc) => tc.steps.some((s) => s.includes('__PROMPT_VALID_USERNAME__')));
const needsPassword = testCases.some((tc) => tc.steps.some((s) => s.includes('__PROMPT_VALID_PASSWORD__')));
let usernameOpt = options.username;
let passwordOpt = options.password;
const interactive = process.env.RUNNER_NON_INTERACTIVE !== 'true' && process.stdout.isTTY;
if (interactive && needsUsername && !usernameOpt) {
const ans = await inquirer_1.default.prompt([{ type: 'input', name: 'username', message: 'Enter username for login placeholders:' }]);
usernameOpt = ans.username;
}
if (interactive && needsPassword && !passwordOpt) {
const ans = await inquirer_1.default.prompt([{ type: 'password', name: 'password', message: 'Enter password for login placeholders:', mask: '*' }]);
passwordOpt = ans.password;
}
const spinner = (0, ora_1.default)('Creating cloud test run...').start();
try {
const response = await client_1.apiClient.createTestRun(project._id, {
testCases: testCases.map(tc => tc._id),
parallel: parseInt(options.parallel, 10) || 2,
environment: options.environment,
aiOptimization: !!options.optimizeAi,
baseUrl: baseUrlOption,
useCloudRunner: true,
credentials: { username: usernameOpt, password: passwordOpt }
});
if (!response.success) {
spinner.fail(chalk_1.default.red(`Failed to create cloud run: ${response.error}`));
return;
}
spinner.succeed(chalk_1.default.green('ā
Cloud test run created.'));
const run = response.data;
console.log(chalk_1.default.bold.cyan('\nš” Cloud Run Details'));
console.log(chalk_1.default.gray('ā'.repeat(40)));
console.log(`${chalk_1.default.bold('Run ID:')} ${run._id}`);
console.log(`${chalk_1.default.bold('Status:')} ${run.status}`);
console.log(`${chalk_1.default.bold('Total Tests:')} ${run.results.total}`);
console.log(`${chalk_1.default.bold('Parallel:')} ${run.config.parallel}`);
console.log(`${chalk_1.default.bold('AI Optimization:')} ${run.config.aiOptimization ? 'Yes' : 'No'}`);
console.log('\nYou can monitor progress with:');
console.log(` ${chalk_1.default.cyan(`labnex status --run-id ${run._id}`)}`);
console.log(` ${chalk_1.default.cyan(`labnex ai analyze ${run._id} <failedTestId>`)} after it completes.`);
if (options.watch) {
await streamRunProgress(run._id);
}
}
catch (err) {
spinner.fail(chalk_1.default.red(`Error triggering cloud run: ${err.message}`));
}
}
async function streamRunProgress(runId) {
const bar = (current, total, width = 20) => {
const filled = Math.round((current / total) * width);
return 'ā'.repeat(filled).padEnd(width, 'ā');
};
console.log(chalk_1.default.cyan('\nš Watching run progress (press Ctrl+C to exit)'));
let completed = false;
while (!completed) {
try {
const res = await client_1.apiClient.getTestRun(runId);
if (res.success) {
const { passed, failed, total } = res.data.results;
const done = passed + failed;
const statusLine = `${bar(done, total)} ${done}/${total} | ${chalk_1.default.green(`${passed}ā`)} ${failed > 0 ? chalk_1.default.red(`${failed}ā`) : ''}`;
process.stdout.write(`\r${statusLine}`);
if (res.data.status === 'COMPLETED' || res.data.status === 'FAILED') {
completed = true;
process.stdout.write('\n');
}
}
}
catch { }
await new Promise(r => setTimeout(r, 3000));
}
}
//# sourceMappingURL=run.js.map