@labnex/cli
Version:
CLI for Labnex, an AI-Powered Testing Automation Platform
236 lines • 12 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const commander_1 = require("commander");
const chalk_1 = __importDefault(require("chalk"));
const figlet_1 = __importDefault(require("figlet"));
const auth_1 = require("./commands/auth");
const projects_1 = require("./commands/projects");
const ai_1 = require("./commands/ai");
const analyze_1 = require("./commands/analyze");
const config_1 = require("./commands/config");
const list_1 = require("./commands/list");
const completion_1 = require("./commands/completion");
const lint_1 = require("./commands/lint");
const testcase_1 = require("./commands/testcase");
const config_2 = require("./utils/config");
const client_1 = require("./api/client");
const ora_1 = __importDefault(require("ora"));
const run_1 = require("./commands/run");
const fs_1 = require("fs");
const os_1 = require("os");
const path_1 = require("path");
const welcomeWizard_1 = require("./welcomeWizard");
async function main() {
// First run detection & configuration init
const configPath = (0, path_1.join)((0, os_1.homedir)(), '.labnex', 'config.json');
const firstRun = !(0, fs_1.existsSync)(configPath);
if (firstRun) {
try {
await (0, welcomeWizard_1.runWelcomeWizard)();
}
catch (err) {
console.error('Wizard failed:', err.message);
process.exit(1);
}
}
// Ensure config exists afterwards
await (0, config_2.initConfig)();
// Display banner
console.log(chalk_1.default.cyan(figlet_1.default.textSync('Labnex CLI', {
font: 'Standard',
horizontalLayout: 'default',
verticalLayout: 'default'
})));
console.log(chalk_1.default.gray('AI-Powered Testing Automation Platform\n'));
commander_1.program
.name('labnex')
.description('The official CLI for the Labnex AI-Powered Testing Automation Platform.')
.version('1.3.0')
.option('-v, --verbose', 'enable verbose output')
.option('--api-url <url>', 'override API URL')
.hook('preAction', (thisCommand) => {
const options = thisCommand.opts();
if (options.verbose) {
process.env.LABNEX_VERBOSE = 'true';
}
if (options.apiUrl) {
process.env.LABNEX_API_URL = options.apiUrl;
}
});
// Main run command - unified and clean
commander_1.program.addCommand(run_1.runCommand);
// Status command
commander_1.program
.command('status')
.description('Monitor test execution status or check a specific test run.')
.option('-r, --run-id <id>', 'Check specific test run ID')
.action(async (options) => {
try {
if (options.runId) {
await checkSpecificTestRun(options.runId);
}
else {
await checkOverallStatus();
}
}
catch (error) {
console.error(chalk_1.default.red('❌ Error checking status:'), error.message);
}
});
// Register other command groups
commander_1.program.addCommand(auth_1.authCommand);
commander_1.program.addCommand(projects_1.projectsCommand);
commander_1.program.addCommand(ai_1.aiCommand);
commander_1.program.addCommand(analyze_1.analyzeCommand);
commander_1.program.addCommand((0, config_1.setupConfigCommands)());
commander_1.program.addCommand(list_1.listCommand);
commander_1.program.addCommand((0, completion_1.completionCommand)(commander_1.program));
// Lint command
commander_1.program.addCommand(lint_1.lintCommand);
commander_1.program.addCommand(testcase_1.createTestCaseCommand);
const defaultHelper = new commander_1.Help();
commander_1.program.configureHelp({
sortSubcommands: true,
showGlobalOptions: true,
helpWidth: 100,
// Colorize the option/command terms
subcommandTerm: (cmd) => chalk_1.default.cyan(cmd.name()),
optionTerm: (option) => chalk_1.default.yellow(option.flags),
// Friendly description fallbacks
subcommandDescription: (cmd) => {
const descriptions = {
run: 'Execute tests for a project (local/cloud)',
status: 'Monitor test execution status',
list: 'List resources (projects, tests)',
auth: 'Manage authentication and API token settings',
projects: 'Manage projects (create, list, show details)',
ai: 'Access AI-powered features (generate, optimize, analyze)',
analyze: 'Analyze test results and identify failure causes',
config: 'Configure Labnex CLI settings'
};
return descriptions[cmd.name()] || cmd.description();
},
// Custom global formatter only for the root program
formatHelp: (cmd, helper) => {
if (cmd === commander_1.program) {
const line = (title) => `${chalk_1.default.gray('─'.repeat(1))} ${chalk_1.default.bold(title)} ${chalk_1.default.gray('─'.repeat(60 - title.length))}`;
let out = '\n' + chalk_1.default.bold.cyan('Labnex CLI — AI-Powered Testing Automation Platform') + '\n';
out += chalk_1.default.gray('='.repeat(80)) + '\n\n';
// Usage
out += line('USAGE') + '\n ' + chalk_1.default.cyan(helper.commandUsage(cmd)) + '\n\n';
// Commands grouped by category
out += line('COMMANDS') + '\n';
const commandCategories = {
'Test Execution': ['run', 'status'],
'Project Management': ['projects', 'list'],
Authentication: ['auth'],
'AI & Analysis': ['ai', 'analyze'],
Configuration: ['config']
};
for (const [category, names] of Object.entries(commandCategories)) {
out += `\n ${chalk_1.default.bold.cyan(category)}`;
names.forEach((name) => {
const sub = cmd.commands.find((c) => c.name() === name);
if (sub) {
const desc = helper.subcommandDescription(sub);
out += `\n ${chalk_1.default.cyan(name.padEnd(12))} ${desc}`;
}
});
out += '\n';
}
// Global options
out += line('GLOBAL OPTIONS') + '\n';
helper.visibleOptions(cmd).forEach((opt) => {
out += ` ${chalk_1.default.yellow(opt.flags.padEnd(25))} ${opt.description}\n`;
});
// Examples section (taken from previous help text)
out += '\n' + chalk_1.default.bold('EXAMPLES:') + '\n';
out += ` ${chalk_1.default.cyan('labnex run --project 6832ac498153de9c85b03727')}\n Run all tests for a project locally\n\n`;
out += ` ${chalk_1.default.cyan('labnex run --project 6832ac498153de9c85b03727 --test-id 68362689160c68e7f548621d')}\n Run a specific test case\n\n`;
out += ` ${chalk_1.default.cyan('labnex run --project MYAPP --mode cloud --parallel 8')}\n Run tests in cloud with 8 parallel workers\n\n`;
out += ` ${chalk_1.default.cyan('labnex list projects')}\n List all available projects\n\n`;
out += ` ${chalk_1.default.cyan('labnex list tests 6832ac498153de9c85b03727')}\n List test cases for a specific project\n\n`;
out += ` ${chalk_1.default.cyan('labnex status')}\n Check overall test execution status\n\n`;
out += ` ${chalk_1.default.cyan('labnex ai generate --description "Test login functionality"')}\n Generate a test case using AI\n\n`;
out += ` ${chalk_1.default.cyan('labnex ai optimize --project LABX')}\n Optimize test suite for a project\n\n`;
out += ` ${chalk_1.default.cyan('labnex analyze failure --run-id <run-id>')}\n Analyze a test failure with AI\n`;
out += chalk_1.default.gray('='.repeat(80)) + '\n';
out += chalk_1.default.cyan('Documentation: ') + 'https://labnexdev.github.io/Labnex\n';
return out;
}
// Fallback to default helper for sub-command help
return defaultHelper.formatHelp(cmd, helper);
}
});
// Parse command line arguments
await commander_1.program.parseAsync(process.argv);
console.log("--- DEBUG: CLI execution finished ---");
}
// Main test execution function is now in commands/run.ts
async function checkOverallStatus() {
const spinner = (0, ora_1.default)('Checking overall system status...').start();
// This is a placeholder. In a real scenario, you'd call an API endpoint.
setTimeout(() => {
spinner.succeed(chalk_1.default.green('All systems operational.'));
console.log(chalk_1.default.gray(' - API Server: OK'));
console.log(chalk_1.default.gray(' - Test Runner Fleet: OK'));
console.log(chalk_1.default.gray(' - Database Connection: OK'));
}, 1000);
}
async function checkSpecificTestRun(runId) {
const spinner = (0, ora_1.default)(`Fetching status for test run ${chalk_1.default.cyan(runId)}...`).start();
try {
const response = await client_1.apiClient.getTestRun(runId);
if (response.success && response.data) {
const run = response.data;
spinner.succeed(chalk_1.default.green('Status retrieved successfully.'));
console.log(chalk_1.default.bold.cyan(`\nTest Run Details (ID: ${runId})`));
console.log(chalk_1.default.gray('──────────────────────────────────'));
let projDisplay;
if (run.project?.projectCode) {
projDisplay = `${run.project.projectCode} (${run.project.name || 'Unnamed'})`;
}
else if (run.projectId) {
// Attempt to look up code/name
try {
const projRes = await client_1.apiClient.getProjects();
if (projRes.success) {
const found = projRes.data.find((p) => p._id === run.projectId);
projDisplay = found ? `${found.projectCode} (${found.name})` : run.projectId;
}
else {
projDisplay = run.projectId;
}
}
catch {
projDisplay = run.projectId;
}
}
else {
projDisplay = 'N/A';
}
console.log(`${chalk_1.default.bold('Project:')} ${projDisplay}`);
console.log(`${chalk_1.default.bold('Status:')} ${run.status}`);
console.log(`${chalk_1.default.bold('Total Tests:')} ${run.results.total}`);
console.log(`${chalk_1.default.bold('Passed:')} ${chalk_1.default.green(run.results.passed)}`);
console.log(`${chalk_1.default.bold('Failed:')} ${chalk_1.default.red(run.results.failed)}`);
console.log(`${chalk_1.default.bold('Duration:')} ${run.results.duration}ms`);
console.log(`${chalk_1.default.bold('Created At:')} ${new Date(run.createdAt).toLocaleString()}`);
}
else {
spinner.fail(chalk_1.default.red(`Failed to fetch test run: ${response.error || 'Unknown error'}`));
}
}
catch (error) {
spinner.fail(chalk_1.default.red(`Error: ${error.message}`));
}
}
main().catch((error) => {
console.error(chalk_1.default.red('\nAn unexpected error occurred:'), error);
process.exit(1);
});
//# sourceMappingURL=index.js.map