UNPKG

@craftapit/tester

Version:

A focused, LLM-powered testing framework for natural language test scenarios

117 lines (116 loc) 5.21 kB
"use strict"; var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { if (k2 === undefined) k2 = k; var desc = Object.getOwnPropertyDescriptor(m, k); if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) { desc = { enumerable: true, get: function() { return m[k]; } }; } Object.defineProperty(o, k2, desc); }) : (function(o, m, k, k2) { if (k2 === undefined) k2 = k; o[k2] = m[k]; })); var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { Object.defineProperty(o, "default", { enumerable: true, value: v }); }) : function(o, v) { o["default"] = v; }); var __importStar = (this && this.__importStar) || (function () { var ownKeys = function(o) { ownKeys = Object.getOwnPropertyNames || function (o) { var ar = []; for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; return ar; }; return ownKeys(o); }; return function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]); __setModuleDefault(result, mod); return result; }; })(); Object.defineProperty(exports, "__esModule", { value: true }); exports.addRunTestsCommand = addRunTestsCommand; const path = __importStar(require("path")); const fs = __importStar(require("fs")); const TestRunner_1 = require("../../core/TestRunner"); /** * Add the run-tests command to the CLI * @param program Commander program instance */ function addRunTestsCommand(program) { program .command('run-tests') .description('Run test stories from a directory') .argument('<directory>', 'Directory containing test stories') .option('-a, --adapter <adapter>', 'LLM adapter to use (anthropic, openai, ollama)', 'ollama') .option('-m, --model <model>', 'Model to use with the LLM adapter') .option('-k, --api-key <apiKey>', 'API key for the LLM adapter') .option('-u, --base-url <baseUrl>', 'Base URL for the LLM adapter') .option('-c, --context-size <size>', 'Context size for the LLM adapter', '16384') .option('-d, --dynamic', 'Enable dynamic context sizing', true) .option('--no-dynamic', 'Disable dynamic context sizing') .option('--caching', 'Enable caching', true) .option('--no-caching', 'Disable caching') .option('--cache-path <path>', 'Path to the cache file') .option('-v, --verbose', 'Enable verbose output') .option('-t, --timeout <timeout>', 'Timeout for test execution in milliseconds', '60000') .option('-p, --pattern <pattern>', 'File pattern to match (default: "**/*.md")', '**/*.md') .action(async (directoryArg, options) => { const directory = path.resolve(directoryArg); // Check if directory exists if (!fs.existsSync(directory)) { console.error(`Directory does not exist: ${directory}`); process.exit(1); } // Configure the adapter const adapterConfig = {}; if (options.model) adapterConfig.model = options.model; if (options.apiKey) adapterConfig.apiKey = options.apiKey; if (options.baseUrl) adapterConfig.baseUrl = options.baseUrl; if (options.contextSize) adapterConfig.contextSize = parseInt(options.contextSize, 10); if (options.dynamic !== undefined) adapterConfig.dynamicContextSizing = options.dynamic; // Create the test runner const runner = (0, TestRunner_1.createTestRunner)({ llmAdapter: options.adapter, llmAdapterConfig: adapterConfig, caching: options.caching, cachePath: options.cachePath, verbose: options.verbose, timeout: options.timeout ? parseInt(options.timeout, 10) : 60000 }); try { // Initialize the runner await runner.initialize(); // Run the tests console.log(`Running tests in ${directory} with pattern ${options.pattern || '**/*.md'}...`); const results = await runner.runTestDirectory(directory, options.pattern); // Calculate summary const totalTests = Object.keys(results).length; const passedTests = Object.values(results).filter(r => r.passed).length; const failedTests = totalTests - passedTests; console.log('\n=== Test Summary ==='); console.log(`Total tests: ${totalTests}`); console.log(`Passed tests: ${passedTests}`); console.log(`Failed tests: ${failedTests}`); // Set exit code based on test results process.exit(failedTests > 0 ? 1 : 0); } catch (error) { console.error('Test execution failed:', error); process.exit(1); } finally { // Clean up await runner.cleanup(); } }); }