UNPKG

hikma-engine

Version:

Code Knowledge Graph Indexer - A sophisticated TypeScript-based indexer that transforms Git repositories into multi-dimensional knowledge stores for AI agents

111 lines (110 loc) 6.45 kB
"use strict"; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); exports.RagCommand = void 0; const chalk_1 = __importDefault(require("chalk")); const base_1 = require("./base"); const enhanced_search_service_1 = require("../../modules/enhanced-search-service"); const llm_rag_1 = require("../../modules/llm-rag"); const python_dependency_checker_1 = require("../../utils/python-dependency-checker"); const ui_1 = require("../ui"); class RagCommand extends base_1.BaseCommand { register() { return this.program .command('rag <query> [project-path]') .description('Generate detailed code explanation with RAG (semantic search + LLM)') .option('-k, --top-k <number>', 'Number of search results to use', '10') .option('--dir <path>', 'Project directory (alternative to positional)') .requiredOption('--provider <provider>', 'Provider override for embeddings and LLM: python|server|local|transformers') .option('--server-url <url>', 'Server URL (required for server provider)') .option('-m, --model <name>', 'Model override (deprecated: use --embedding-model and --llm-model instead)') .option('--embedding-model <name>', 'Embedding model name for search') .option('--llm-model <name>', 'LLM model name for explanation generation') .option('--max-tokens <number>', 'Maximum tokens for LLM completion (for OpenAI-compatible providers)', '1500') .option('--install-python-deps', 'Automatically install Python dependencies if missing') .action(async (query, projectPath, options) => { try { if (!query || query.trim().length === 0) { throw new Error('RAG query cannot be empty'); } const topK = parseInt(options.topK); if (isNaN(topK) || topK < 1 || topK > 100) { throw new Error('top-k must be a number between 1 and 100'); } let maxTokens = undefined; if (options.maxTokens !== undefined) { const parsed = parseInt(options.maxTokens); if (isNaN(parsed) || parsed < 256 || parsed > 32768) { throw new Error('--max-tokens must be a number between 256 and 32768'); } maxTokens = parsed; } // Validate provider-specific requirements if (options.provider === 'server' && !options.serverUrl) { throw new Error('Server provider requires --server-url'); } const resolvedPath = this.resolveProjectRoot(projectPath, options.dir); const config = this.initConfigAndLogger(resolvedPath); const globalOpts = { dir: options.dir, provider: options.provider, serverUrl: options.serverUrl, model: options.model, embeddingModel: options.embeddingModel, llmModel: options.llmModel, installPythonDeps: !!options.installPythonDeps, }; // Apply explicit CLI configuration const explicitConfig = this.buildExplicitConfig(globalOpts); config.updateConfig(explicitConfig); (0, ui_1.displayCommandHeader)('RAG Explanation', `Query: "${query}" in ${resolvedPath}`); // Initialize search service const searchService = new enhanced_search_service_1.EnhancedSearchService(config); (0, ui_1.displayProgress)('Initializing search service...'); await searchService.initialize(); // Perform semantic search first const searchOptions = { limit: topK, }; (0, ui_1.displayProgress)('Performing semantic search...'); const results = await searchService.semanticSearch(query, searchOptions); (0, ui_1.displayResults)(results, 'Top Results for RAG'); // If chosen provider is python, ensure deps const llmProvider = config.getAIConfig().llmProvider.provider; if (llmProvider === 'python') { if (globalOpts.installPythonDeps) { try { await (0, python_dependency_checker_1.ensurePythonDependencies)(true, true); } catch (depError) { console.log(chalk_1.default.yellow('⚠️ Failed to install Python dependencies automatically:'), depError instanceof Error ? depError.message : String(depError)); console.log(chalk_1.default.blue('💡 Try running: npm run setup-python')); throw depError; } } else { const ready = await (0, python_dependency_checker_1.isPythonEnvironmentReady)(); if (!ready) { console.log(chalk_1.default.yellow('⚠️ Python dependencies are not available for RAG feature')); console.log(chalk_1.default.blue('💡 Run with --install-python-deps to install automatically, or:')); console.log(chalk_1.default.cyan(' npm run setup-python')); throw new Error('Python dependencies required for RAG feature'); } } } const adapted = (0, llm_rag_1.adaptSearchResults)(results); (0, ui_1.displayProgress)('Generating explanation with LLM...'); const ragResponse = await (0, llm_rag_1.generateRAGExplanation)(query, adapted, { model: options.model, maxTokens }); (0, ui_1.displayRAGExplanation)(query, ragResponse); // Exit successfully to prevent hanging await this.exitSuccess(); } catch (error) { this.handleError(error, 'RAG failed'); } }); } } exports.RagCommand = RagCommand;