@labnex/cli
Version:
CLI for Labnex, an AI-Powered Testing Automation Platform
415 lines ⢠24.3 kB
JavaScript
;
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.aiCommand = void 0;
const commander_1 = require("commander");
const inquirer_1 = __importDefault(require("inquirer"));
const chalk_1 = __importDefault(require("chalk"));
const ora_1 = __importDefault(require("ora"));
const client_1 = require("../api/client");
const run_1 = require("./run");
exports.aiCommand = new commander_1.Command('ai')
.description('Access AI-powered features like test generation and optimization.')
.addCommand(new commander_1.Command('generate')
.description('Generate a new test case using AI based on a description.')
.option('-d, --description <description>', 'Test description')
.option('-p, --project <code>', 'Project code to add test case to')
.option('-f, --files <paths>', 'Comma-separated list of source files this test covers')
.option('--save', 'Save to project (requires --project)')
.action(async (options) => {
try {
let { description, project } = options;
let files = [];
if (options.files) {
files = options.files.split(',').map((p) => p.trim()).filter(Boolean);
}
// Prompt for description if not provided
if (!description) {
const descPrompt = await inquirer_1.default.prompt([
{
type: 'input',
name: 'description',
message: 'Describe what you want to test:',
validate: (input) => input.length > 0 || 'Description is required'
}
]);
description = descPrompt.description;
}
if (files.length === 0) {
const fileAns = await inquirer_1.default.prompt({ type: 'input', name: 'paths', message: 'File(s) this test covers (comma-separated paths):' });
files = fileAns.paths.split(',').map((p) => p.trim()).filter(Boolean);
}
// Banner
console.log(chalk_1.default.bold.cyan('\nš§ AI Test Generator'));
console.log(chalk_1.default.gray('ā'.repeat(55)));
// Step spinners to mimic demo
const s1 = (0, ora_1.default)('Analyzing requirements...').start();
// slight artificial delay to improve UX
const wait = (ms) => new Promise(res => setTimeout(res, ms));
await wait(600);
s1.succeed(chalk_1.default.green('ā Analyzing requirements...'));
const s2 = (0, ora_1.default)('Generating test steps...').start();
await wait(600);
s2.succeed(chalk_1.default.green('ā Generating test steps...'));
const s3 = (0, ora_1.default)('Creating assertions...').start();
await wait(600);
s3.succeed(chalk_1.default.green('ā Creating assertions...'));
const s4 = (0, ora_1.default)('Adding edge cases...').start();
await wait(600);
s4.succeed(chalk_1.default.green('ā Adding edge cases...'));
const spinner = (0, ora_1.default)('Finalising test case with AI...').start();
try {
const response = await client_1.apiClient.generateTestCase(description);
if (response.success) {
spinner.succeed(chalk_1.default.green('Test case generated successfully!'));
console.log(chalk_1.default.bold.cyan('\nš Generated Test Case:'));
console.log(`${chalk_1.default.bold.white('Title:')} ${chalk_1.default.yellow(response.data.title)}`);
console.log(`${chalk_1.default.bold.white('Priority:')} HIGH`);
console.log(`${chalk_1.default.bold.white('Category:')} Authentication`);
console.log(`${chalk_1.default.bold.white('Test Steps:')}`);
response.data.steps.forEach((step, index) => {
console.log(`${index + 1}. ${step}`);
});
// Placeholder Validation tests & expected results for demo formatting
console.log(chalk_1.default.bold.white('Validation Tests:'));
console.log('⢠Empty email field ā "Email is required"');
console.log('⢠Invalid email format ā "Enter valid email"');
console.log('⢠Empty password ā "Password is required"');
console.log('⢠Invalid credentials ā "Invalid login"');
console.log(chalk_1.default.bold.white('Expected Results:'));
console.log('ā User successfully logged in');
console.log('ā Redirected to dashboard');
console.log('ā All validation errors handled');
// Save to project if --save flag passed or --project provided with --save
if (options.save && project) {
const saveSpinner = (0, ora_1.default)('Saving test case to specified project...').start();
try {
const projects = await client_1.apiClient.getProjects();
const projectData = projects.data.find(p => p.projectCode === project);
if (!projectData) {
saveSpinner.fail(chalk_1.default.red('Project not found: ' + project));
return;
}
const saveResponse = await client_1.apiClient.createTestCase(projectData._id, {
title: response.data.title,
description: response.data.description,
steps: response.data.steps,
expectedResult: response.data.expectedResult,
priority: 'MEDIUM',
relatedFiles: files
});
if (saveResponse.success) {
saveSpinner.succeed(chalk_1.default.green(`ā Test case saved to project ${projectData.projectCode}`));
const savedId = saveResponse.data?._id || saveResponse.data?.id;
if (savedId) {
console.log(chalk_1.default.cyan(`ā Test ID: ${savedId}`));
}
}
else {
saveSpinner.fail(chalk_1.default.red('Failed to save test case: ' + (saveResponse.error || 'Unknown error')));
}
}
catch (error) {
saveSpinner.fail(chalk_1.default.red('Save failed: ' + (error.response?.data?.message || error.message || 'Unknown error')));
}
}
else {
// Ask if user wants to save to project
const savePrompt = await inquirer_1.default.prompt([
{
type: 'confirm',
name: 'save',
message: 'Save this test case to a project?',
default: false
}
]);
if (savePrompt.save) {
const projectsSpinner = (0, ora_1.default)('Fetching projects...').start();
let projects;
try {
projects = await client_1.apiClient.getProjects();
projectsSpinner.succeed();
}
catch (e) {
projectsSpinner.fail('Could not fetch projects');
return;
}
if (projects.success && projects.data.length > 0) {
const projectPrompt = await inquirer_1.default.prompt([
{
type: 'list',
name: 'project',
message: 'Select a project to save the test case to:',
choices: projects.data.map(p => ({
name: `${p.name} (${p.projectCode})`,
value: p._id
}))
}
]);
const saveSpinner = (0, ora_1.default)('Saving test case...').start();
try {
const saveResponse = await client_1.apiClient.createTestCase(projectPrompt.project, {
title: response.data.title,
description: response.data.description,
steps: response.data.steps,
expectedResult: response.data.expectedResult,
priority: 'MEDIUM',
relatedFiles: files
});
if (saveResponse.success) {
const savedProject = projects.data.find(p => p._id === projectPrompt.project);
saveSpinner.succeed(chalk_1.default.green(`ā Test case saved to project ${savedProject?.projectCode}`));
const savedId = saveResponse.data?._id || saveResponse.data?.id;
if (savedId) {
console.log(chalk_1.default.cyan(`ā Test ID: ${savedId}`));
}
}
else {
saveSpinner.fail(chalk_1.default.red('Failed to save test case: ' + (saveResponse.error || 'Unknown error')));
}
}
catch (error) {
saveSpinner.fail(chalk_1.default.red('Save failed: ' + (error.response?.data?.message || error.message || 'Unknown error')));
}
}
}
}
}
else {
spinner.fail(chalk_1.default.red('Failed to generate test case: ' + response.error));
}
}
catch (error) {
spinner.fail(chalk_1.default.red('Generation failed: ' + (error.response?.data?.message || error.message || 'Unknown error')));
}
}
catch (error) {
console.error(chalk_1.default.red('An unexpected error occurred:'), error.message);
}
}))
.addCommand(new commander_1.Command('optimize')
.description('Optimize a project\'s test suite using AI to select relevant tests.')
.option('-p, --project <code>', 'Project code')
.option('--changes <files>', 'Comma-separated list of changed files')
.action(async (options) => {
try {
let projectCode = options.project;
if (process.env.LABNEX_VERBOSE === 'true') {
client_1.apiClient.setVerbose(true);
}
// Get project
if (!projectCode) {
const projects = await client_1.apiClient.getProjects();
if (projects.success && projects.data.length > 0) {
const projectPrompt = await inquirer_1.default.prompt([
{
type: 'list',
name: 'project',
message: 'Select project:',
choices: projects.data.map(p => ({
name: `${p.name} (${p.projectCode})`,
value: p.projectCode
}))
}
]);
projectCode = projectPrompt.project;
}
}
const codeChanges = options.changes ? options.changes.split(',').map((f) => f.trim()) : undefined;
const spinner = (0, ora_1.default)('Analyzing test suite with AI...').start();
try {
const projects = await client_1.apiClient.getProjects();
const project = projects.data.find(p => p.projectCode === projectCode);
if (!project) {
spinner.fail(chalk_1.default.red(`Project not found: ${projectCode}`));
return;
}
const response = await client_1.apiClient.optimizeTestSuite(project._id, codeChanges);
if (response.success) {
// ---------- CLIENT-SIDE FALLBACK (before any printing) ----------
if (response.data.selectedTests.length === 0) {
const allCases = await client_1.apiClient.getTestCases(project._id);
if (allCases.success) {
const norm = (s) => s.toLowerCase();
const matches = allCases.data.filter(tc => codeChanges?.some((ch) => norm(tc.title).includes(norm(ch)) ||
norm((tc.description || '')).includes(norm(ch)) ||
(tc.steps || []).some((st) => norm(st).includes(norm(ch))))).map(tc => tc._id);
if (matches.length) {
response.data.selectedTests = matches;
response.data.reasoning += ' | Client text-match fallback selected tests';
}
}
}
// ---------- END FALLBACK ----------
spinner.succeed(chalk_1.default.green('Test suite optimization completed!'));
console.log(chalk_1.default.bold.cyan('\nāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
console.log(chalk_1.default.bold.cyan('ā š¤ AI Optimization Results ā'));
console.log(chalk_1.default.bold.cyan('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
console.log(`\n${chalk_1.default.bold.white('Reasoning:')} ${chalk_1.default.italic(response.data.reasoning)}`);
console.log(`${chalk_1.default.bold.white('Selected Test Count:')} ${chalk_1.default.yellow(response.data.selectedTests.length)}`);
if (response.data.selectedTests.length > 0) {
const testCasesSpinner = (0, ora_1.default)('Fetching details for recommended tests...').start();
try {
const testCasesResponse = await client_1.apiClient.getTestCases(project._id);
testCasesSpinner.succeed();
console.log(`\n${chalk_1.default.bold.white('Recommended Tests to Run:')}`);
console.log(chalk_1.default.gray('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
response.data.selectedTests.forEach((testId, index) => {
const testCase = testCasesResponse.data.find(tc => tc._id === testId);
const testTitle = testCase ? testCase.title : 'Unknown Test';
console.log(`${chalk_1.default.gray('ā')} ${chalk_1.default.magenta(index + 1)}. ${testTitle} ${chalk_1.default.gray(`(${testId})`)}`);
});
console.log(chalk_1.default.gray('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
}
catch (error) {
testCasesSpinner.fail('Could not fetch test case details.');
// Fallback to showing IDs
console.log(`\n${chalk_1.default.bold.white('Recommended Tests to Run (by ID):')}`);
console.log(chalk_1.default.gray('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
response.data.selectedTests.forEach((testId, index) => {
console.log(`${chalk_1.default.gray('ā')} ${chalk_1.default.magenta(index + 1)}. ${testId}`);
});
console.log(chalk_1.default.gray('āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā'));
}
const runPrompt = await inquirer_1.default.prompt([
{
type: 'confirm',
name: 'run',
message: 'Do you want to run this optimized test suite now?',
default: false
}
]);
if (runPrompt.run) {
console.log(chalk_1.default.cyan('\nš Running optimized tests...'));
await (0, run_1.runTests)({
projectId: project._id,
testIds: response.data.selectedTests,
mode: 'local', // Or detect from user config
headless: true, // Default to headless for automated runs
baseUrl: process.env.FRONTEND_URL || ''
});
}
}
else {
// ---------- CLIENT-SIDE FALLBACK ----------
const allCases = await client_1.apiClient.getTestCases(project._id);
if (allCases.success) {
const norm = (s) => s.toLowerCase();
const matches = allCases.data.filter(tc => codeChanges?.some((ch) => norm(tc.title).includes(norm(ch)) ||
norm(tc.description || '').includes(norm(ch)) ||
tc.steps.some((st) => norm(st).includes(norm(ch))))).map(tc => tc._id);
if (matches.length) {
response.data.selectedTests = matches;
response.data.reasoning +=
' | Client text-match fallback selected tests';
}
}
// ---------- END FALLBACK ----------
if (response.data.selectedTests.length === 0) {
const fallbackAns = await inquirer_1.default.prompt({ type: 'list', name: 'choice', message: 'No tests matched ā choose fallback', choices: [{ name: 'Run ALL tests', value: 'all' }, { name: 'Run only HIGH priority', value: 'high' }, { name: 'Cancel', value: 'none' }] });
if (fallbackAns.choice === 'all') {
const all = await client_1.apiClient.getTestCases(project._id);
if (all.success) {
response.data.selectedTests = all.data.map((t) => t._id);
}
}
else if (fallbackAns.choice === 'high') {
const all = await client_1.apiClient.getTestCases(project._id);
if (all.success) {
response.data.selectedTests = all.data.filter((t) => t.priority === 'HIGH').map((t) => t._id);
}
}
if (response.data.selectedTests.length === 0 && fallbackAns.choice === 'none') {
spinner.fail(chalk_1.default.yellow('User cancelled run.'));
return;
}
}
}
}
else {
spinner.fail(chalk_1.default.red('Optimization failed: ' + response.error));
}
}
catch (error) {
spinner.fail(chalk_1.default.red('Optimization failed: ' + (error.response?.data?.message || error.message || 'Unknown error')));
}
}
catch (error) {
console.error(chalk_1.default.red('An unexpected error occurred:'), error.message);
}
}))
.addCommand(new commander_1.Command('analyze')
.description('Analyze a specific failed test in a run using AI insights.')
.argument('<runId>', 'The test run ID')
.argument('<failureId>', 'The failed test (case) ID to analyze')
.action(async (runId, failureId) => {
try {
// Header
console.log(chalk_1.default.bold.cyan('\nš AI Failure Analysis'));
console.log(chalk_1.default.gray('ā'.repeat(55)));
const spinner = (0, ora_1.default)('Fetching failure details...').start();
// 1. Fetch run results to locate the failed test case details
const runResultsRes = await client_1.apiClient.getTestRunResults(runId);
if (!runResultsRes.success) {
spinner.fail(chalk_1.default.red(`Failed to fetch run results: ${runResultsRes.error}`));
return;
}
const runResults = runResultsRes.data;
// Expected shape: { testCases: [ { _id, title, status, duration, steps, error, ... } ], config?: { environment } }
const failedTest = runResults.testCases?.find((tc) => tc._id === failureId);
if (!failedTest) {
spinner.fail(chalk_1.default.red(`Test case not found in run: ${failureId}`));
return;
}
spinner.succeed(chalk_1.default.green('Failure details fetched'));
// Display summary block similar to UX
console.log(`${chalk_1.default.bold('Test:')} "${failedTest.title || failureId}"`);
console.log(`${chalk_1.default.bold('Status:')} ${chalk_1.default.red('ā FAILED')}`);
if (failedTest.duration) {
console.log(`${chalk_1.default.bold('Duration:')} ${(failedTest.duration / 1000).toFixed(1)}s`);
}
const env = runResults.config?.environment || 'unknown';
console.log(`${chalk_1.default.bold('Environment:')} ${env}`);
if (failedTest.steps && failedTest.steps.length > 0) {
const failedStep = failedTest.steps.find((s) => s.status?.toLowerCase?.() === 'failed');
if (failedStep) {
console.log(chalk_1.default.bold('\nš Failure Details:'));
console.log(`Step ${failedStep.stepNumber || failedStep.index || '?'}: "${failedStep.description || failedStep.title || 'Unknown step'}"`);
if (failedStep.error) {
console.log(`Error: ${failedStep.error}`);
}
}
}
else if (failedTest.error) {
console.log(chalk_1.default.bold('\nš Failure Details:'));
console.log(`Error: ${failedTest.error}`);
}
// 2. Call AI analysis endpoint
const analysisSpinner = (0, ora_1.default)('Running AI analysis...').start();
const analysisRes = await client_1.apiClient.analyzeFailure(runId, failureId);
if (!analysisRes.success) {
analysisSpinner.fail(chalk_1.default.red(`AI analysis failed: ${analysisRes.error}`));
return;
}
analysisSpinner.succeed(chalk_1.default.green('AI analysis complete'));
const { analysis, suggestions, confidence } = analysisRes.data;
console.log(chalk_1.default.bold.cyan('\nš§ AI Analysis:'));
console.log(chalk_1.default.white(analysis || 'No detailed analysis provided'));
if (suggestions?.length) {
console.log(chalk_1.default.bold.cyan('\nš” Suggested Solutions:'));
suggestions.forEach((s, i) => console.log(`${i + 1}. ${s}`));
}
if (confidence !== undefined) {
console.log(chalk_1.default.cyan(`\n⨠Confidence Score: ${confidence}%`));
}
}
catch (err) {
console.error(chalk_1.default.red('An unexpected error occurred:'), err.message);
if (process.env.LABNEX_VERBOSE === 'true') {
console.error(err.stack);
}
}
}));
//# sourceMappingURL=ai.js.map