UNPKG

ai-debug-local-mcp

Version:

🎯 ENHANCED AI GUIDANCE v4.1.2: Dramatically improved tool descriptions help AI users choose the right tools instead of 'close enough' options. Ultra-fast keyboard automation (10x speed), universal recording, multi-ecosystem debugging support, and compreh

1,234 lines (1,233 loc) • 52.9 kB
/** * Rust Debugging Handler - Comprehensive Rust Development Tools * * Provides real, functional debugging tools for Rust development including: * - Project analysis and dependency management with Cargo * - Build and compilation analysis with detailed error reporting * - Testing and benchmarking with coverage analysis * - Performance profiling and memory analysis * - Static analysis with Clippy and security auditing * - Framework-specific debugging for web applications * * All tools provide REAL functionality with actual Rust toolchain integration. */ import { exec } from 'child_process'; import { promisify } from 'util'; import * as fs from 'fs/promises'; import * as path from 'path'; import { existsSync } from 'fs'; import { BaseToolHandler } from './base-handler-migrated.js'; const execAsync = promisify(exec); export class RustDebuggingHandler extends BaseToolHandler { name = 'rust-debugging'; description = 'Comprehensive Rust development debugging tools with real Cargo toolchain integration'; get tools() { return this.getTools(); } getTools() { return [ { name: 'rust_project_inspector', description: 'Analyze Rust project structure, Cargo configuration, and workspace setup', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory (defaults to current directory)' }, includeDevDependencies: { type: 'boolean', description: 'Include dev dependencies in analysis', default: true }, analyzeBuildScripts: { type: 'boolean', description: 'Analyze build.rs scripts', default: true }, checkFeatures: { type: 'boolean', description: 'Analyze cargo features', default: true } } } }, { name: 'rust_build_analyzer', description: 'Analyze Rust compilation process, detect build issues, and suggest optimizations', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, target: { type: 'string', description: 'Target triple for cross-compilation (e.g. "x86_64-unknown-linux-gnu")' }, profile: { type: 'string', enum: ['dev', 'release', 'test', 'bench'], description: 'Cargo profile to use for build', default: 'dev' }, features: { type: 'array', items: { type: 'string' }, description: 'Features to enable during build' }, enableAllFeatures: { type: 'boolean', description: 'Enable all available features', default: false }, checkOnly: { type: 'boolean', description: 'Only check compilation without producing binary', default: false } } } }, { name: 'rust_test_runner', description: 'Execute Rust tests with coverage analysis and detailed reporting', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, testPattern: { type: 'string', description: 'Pattern to filter tests (regex)' }, enableCoverage: { type: 'boolean', description: 'Enable code coverage analysis with cargo-tarpaulin', default: true }, runBenchmarks: { type: 'boolean', description: 'Run benchmark tests', default: false }, docTests: { type: 'boolean', description: 'Include documentation tests', default: true }, features: { type: 'array', items: { type: 'string' }, description: 'Features to enable during testing' }, testThreads: { type: 'integer', description: 'Number of test threads to use', default: null } } } }, { name: 'rust_dependency_analyzer', description: 'Analyze Rust dependencies for vulnerabilities, updates, and optimization', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, checkVulnerabilities: { type: 'boolean', description: 'Check for security vulnerabilities with cargo-audit', default: true }, checkOutdated: { type: 'boolean', description: 'Check for outdated dependencies', default: true }, analyzeLicenses: { type: 'boolean', description: 'Analyze dependency licenses', default: true }, duplicateAnalysis: { type: 'boolean', description: 'Analyze duplicate dependencies', default: true } } } }, { name: 'rust_clippy_analyzer', description: 'Run Clippy static analysis with detailed suggestions', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, lintLevel: { type: 'string', enum: ['warn', 'deny', 'forbid', 'allow'], description: 'Clippy lint level', default: 'warn' }, features: { type: 'array', items: { type: 'string' }, description: 'Features to enable during analysis' }, target: { type: 'string', description: 'Target triple for analysis' }, fixSuggestions: { type: 'boolean', description: 'Include automatic fix suggestions', default: true } } } }, { name: 'rust_performance_profiler', description: 'Profile Rust application performance with flamegraphs and detailed metrics', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, profileType: { type: 'string', enum: ['cpu', 'memory', 'instructions', 'cache-misses'], description: 'Type of profiling to perform', default: 'cpu' }, binary: { type: 'string', description: 'Specific binary to profile (for workspace projects)' }, args: { type: 'array', items: { type: 'string' }, description: 'Arguments to pass to the profiled program' }, duration: { type: 'string', description: 'Duration to profile (e.g. "30s", "1m")', default: '30s' }, outputFormat: { type: 'string', enum: ['flamegraph', 'text', 'json'], description: 'Output format for profiling results', default: 'flamegraph' } } } }, { name: 'rust_memory_analyzer', description: 'Analyze memory usage, leaks, and allocation patterns', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, sanitizer: { type: 'string', enum: ['address', 'memory', 'thread', 'leak'], description: 'Sanitizer to use for analysis', default: 'address' }, binary: { type: 'string', description: 'Specific binary to analyze' }, args: { type: 'array', items: { type: 'string' }, description: 'Arguments to pass to the analyzed program' }, valgrindAnalysis: { type: 'boolean', description: 'Use Valgrind for memory analysis', default: false } } } }, { name: 'rust_macro_expander', description: 'Expand macros for debugging and analysis with cargo-expand', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, item: { type: 'string', description: 'Specific item to expand (function, struct, etc.)' }, pretty: { type: 'boolean', description: 'Pretty print the expanded code', default: true }, features: { type: 'array', items: { type: 'string' }, description: 'Features to enable during expansion' } } } }, { name: 'rust_assembly_inspector', description: 'Inspect generated assembly code with cargo-asm', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, function: { type: 'string', description: 'Function to inspect (e.g. "myfunction" or "lib::myfunction")' }, target: { type: 'string', description: 'Target triple for assembly generation' }, optimize: { type: 'boolean', description: 'Generate optimized assembly', default: true }, format: { type: 'string', enum: ['intel', 'att'], description: 'Assembly syntax format', default: 'intel' } } } }, { name: 'rust_framework_detector', description: 'Detect and analyze Rust web frameworks with framework-specific debugging', inputSchema: { type: 'object', properties: { projectPath: { type: 'string', description: 'Path to Rust project directory' }, analyzeRoutes: { type: 'boolean', description: 'Analyze HTTP routes and handlers', default: true }, analyzeMiddleware: { type: 'boolean', description: 'Analyze middleware stack', default: true }, asyncAnalysis: { type: 'boolean', description: 'Perform async/await runtime analysis', default: true }, tokioAnalysis: { type: 'boolean', description: 'Analyze Tokio runtime configuration', default: true } } } } ]; } async handle(toolName, args, sessions) { const projectPath = args.projectPath || process.cwd(); try { switch (toolName) { case 'rust_project_inspector': return await this.inspectRustProject(projectPath, args); case 'rust_build_analyzer': return await this.analyzeBuild(projectPath, args); case 'rust_test_runner': return await this.runTests(projectPath, args); case 'rust_dependency_analyzer': return await this.analyzeDependencies(projectPath, args); case 'rust_clippy_analyzer': return await this.runClippyAnalysis(projectPath, args); case 'rust_performance_profiler': return await this.profilePerformance(projectPath, args); case 'rust_memory_analyzer': return await this.analyzeMemory(projectPath, args); case 'rust_macro_expander': return await this.expandMacros(projectPath, args); case 'rust_assembly_inspector': return await this.inspectAssembly(projectPath, args); case 'rust_framework_detector': return await this.detectFramework(projectPath, args); default: throw new Error(`Unknown tool: ${toolName}`); } } catch (error) { return { success: false, error: error instanceof Error ? error.message : String(error), toolName, timestamp: new Date().toISOString() }; } } async inspectRustProject(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), analysis: {} }; try { // Check if it's a Rust project const cargoTomlPath = path.join(projectPath, 'Cargo.toml'); if (!existsSync(cargoTomlPath)) { return { success: false, error: 'Not a Rust project (no Cargo.toml found)', projectPath }; } // Get cargo metadata const { stdout: metadataJson } = await execAsync('cargo metadata --format-version 1', { cwd: projectPath }); const metadata = JSON.parse(metadataJson); results.analysis.metadata = metadata; // Parse Cargo.toml const cargoTomlContent = await fs.readFile(cargoTomlPath, 'utf8'); results.analysis.cargoToml = this.parseCargoToml(cargoTomlContent); // Workspace analysis if (metadata.workspace_members.length > 1) { results.analysis.workspace = { isWorkspace: true, members: metadata.workspace_members, memberCount: metadata.workspace_members.length }; } // Dependency analysis results.analysis.dependencies = this.analyzeDependencyTree(metadata); // Feature analysis if (args.checkFeatures) { results.analysis.features = await this.analyzeFeatures(projectPath, metadata); } // Build script analysis if (args.analyzeBuildScripts) { results.analysis.buildScripts = await this.analyzeBuildScripts(projectPath); } // Project structure results.analysis.structure = await this.analyzeProjectStructure(projectPath); return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async analyzeBuild(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), buildAnalysis: {} }; try { // Prepare build command let buildCmd = args.checkOnly ? 'cargo check' : 'cargo build'; buildCmd += ' --message-format=json'; if (args.profile && args.profile !== 'dev') { buildCmd += ` --${args.profile}`; } if (args.target) { buildCmd += ` --target=${args.target}`; } if (args.features && args.features.length > 0) { buildCmd += ` --features="${args.features.join(',')}"`; } if (args.enableAllFeatures) { buildCmd += ' --all-features'; } // Run build const startTime = Date.now(); const { stdout, stderr } = await execAsync(buildCmd, { cwd: projectPath, maxBuffer: 1024 * 1024 * 10 // 10MB buffer for large builds }); const buildTime = Date.now() - startTime; // Parse build messages const buildMessages = stdout.trim().split('\n') .filter(line => line.trim()) .map(line => { try { return JSON.parse(line); } catch { return null; } }) .filter(msg => msg !== null); results.buildAnalysis = { success: true, buildTime: `${buildTime}ms`, command: buildCmd, messages: buildMessages, summary: this.summarizeBuildMessages(buildMessages), stderr: stderr }; // Check for common build issues results.buildAnalysis.issues = this.detectBuildIssues(buildMessages); // Suggest optimizations results.buildAnalysis.optimizations = await this.suggestBuildOptimizations(projectPath, args); return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); results.buildAnalysis.stderr = error instanceof Error && 'stderr' in error ? error.stderr : ''; return results; } } async runTests(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), testResults: {} }; try { // Prepare test command let testCmd = 'cargo test --message-format=json'; if (args.testPattern) { testCmd += ` ${args.testPattern}`; } if (args.features && args.features.length > 0) { testCmd += ` --features="${args.features.join(',')}"`; } if (args.testThreads) { testCmd += ` -- --test-threads=${args.testThreads}`; } if (!args.docTests) { testCmd += ' --lib --bins'; } // Run tests const startTime = Date.now(); const { stdout, stderr } = await execAsync(testCmd, { cwd: projectPath, maxBuffer: 1024 * 1024 * 10 }); const testTime = Date.now() - startTime; // Parse test results const testMessages = stdout.trim().split('\n') .filter(line => line.trim()) .map(line => { try { return JSON.parse(line); } catch { return null; } }) .filter(msg => msg !== null); results.testResults = { totalTime: `${testTime}ms`, command: testCmd, messages: testMessages, summary: this.summarizeTestResults(testMessages), stderr: stderr }; // Coverage analysis if enabled if (args.enableCoverage) { results.testResults.coverage = await this.runCoverageAnalysis(projectPath, args); } // Benchmark results if enabled if (args.runBenchmarks) { results.testResults.benchmarks = await this.runBenchmarks(projectPath, args); } return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async analyzeDependencies(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), dependencyAnalysis: {} }; try { // Get cargo metadata const { stdout: metadataJson } = await execAsync('cargo metadata --format-version 1', { cwd: projectPath }); const metadata = JSON.parse(metadataJson); // Basic dependency info results.dependencyAnalysis.dependencies = metadata.packages.map(pkg => ({ name: pkg.name, version: pkg.version, dependencies: pkg.dependencies })); // Vulnerability check with cargo-audit if (args.checkVulnerabilities) { try { const { stdout: auditOutput } = await execAsync('cargo audit --json', { cwd: projectPath }); results.dependencyAnalysis.vulnerabilities = JSON.parse(auditOutput); } catch (error) { results.dependencyAnalysis.vulnerabilityNote = 'Install cargo-audit for vulnerability scanning: cargo install cargo-audit'; } } // Outdated dependencies check if (args.checkOutdated) { try { const { stdout: outdatedOutput } = await execAsync('cargo outdated --root-deps-only --format json', { cwd: projectPath }); results.dependencyAnalysis.outdated = JSON.parse(outdatedOutput); } catch (error) { results.dependencyAnalysis.outdatedNote = 'Install cargo-outdated: cargo install cargo-outdated'; } } // License analysis if (args.analyzeLicenses) { results.dependencyAnalysis.licenses = this.analyzeLicenses(metadata); } // Duplicate analysis if (args.duplicateAnalysis) { results.dependencyAnalysis.duplicates = this.findDuplicateDependencies(metadata); } // Dependency tree analysis results.dependencyAnalysis.tree = this.buildDependencyTree(metadata); return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async runClippyAnalysis(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), clippyAnalysis: {} }; try { // Prepare clippy command let clippyCmd = 'cargo clippy --message-format=json'; if (args.lintLevel && args.lintLevel !== 'warn') { clippyCmd += ` -- -${args.lintLevel} clippy::all`; } if (args.features && args.features.length > 0) { clippyCmd += ` --features="${args.features.join(',')}"`; } if (args.target) { clippyCmd += ` --target=${args.target}`; } // Run clippy const { stdout, stderr } = await execAsync(clippyCmd, { cwd: projectPath, maxBuffer: 1024 * 1024 * 10 }); // Parse clippy messages const clippyMessages = stdout.trim().split('\n') .filter(line => line.trim()) .map(line => { try { return JSON.parse(line); } catch { return null; } }) .filter(msg => msg !== null); results.clippyAnalysis = { command: clippyCmd, messages: clippyMessages, summary: this.summarizeClippyResults(clippyMessages), stderr: stderr }; // Categorize lints results.clippyAnalysis.categories = this.categorizeClippyLints(clippyMessages); // Fix suggestions if enabled if (args.fixSuggestions) { results.clippyAnalysis.fixSuggestions = this.extractClippyFixes(clippyMessages); } return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async profilePerformance(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), profileAnalysis: {} }; try { const profileType = args.profileType || 'cpu'; const duration = args.duration || '30s'; // Check if cargo-flamegraph is available try { await execAsync('cargo flamegraph --version', { cwd: projectPath }); } catch (error) { return { success: false, error: 'cargo-flamegraph not installed. Install with: cargo install flamegraph', installCommand: 'cargo install flamegraph' }; } // Prepare flamegraph command let flamegraphCmd = `cargo flamegraph --${profileType}`; if (args.binary) { flamegraphCmd += ` --bin=${args.binary}`; } if (args.args && args.args.length > 0) { flamegraphCmd += ` -- ${args.args.join(' ')}`; } // Run profiling const { stdout, stderr } = await execAsync(flamegraphCmd, { cwd: projectPath, timeout: 120000 // 2 minutes timeout }); results.profileAnalysis = { type: profileType, duration, command: flamegraphCmd, output: stdout, stderr: stderr, format: args.outputFormat || 'flamegraph' }; // Check if flamegraph was generated const flamegraphPath = path.join(projectPath, 'flamegraph.svg'); if (existsSync(flamegraphPath)) { results.profileAnalysis.flamegraphGenerated = true; results.profileAnalysis.flamegraphPath = flamegraphPath; } return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async analyzeMemory(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), memoryAnalysis: {} }; try { const sanitizer = args.sanitizer || 'address'; // Prepare build command with sanitizer const buildCmd = `RUSTFLAGS="-Zsanitizer=${sanitizer}" cargo build --target x86_64-unknown-linux-gnu`; // Build with sanitizer const { stdout: buildOutput, stderr: buildStderr } = await execAsync(buildCmd, { cwd: projectPath, env: { ...process.env, RUSTFLAGS: `-Zsanitizer=${sanitizer}` } }); results.memoryAnalysis = { sanitizer, buildSuccess: true, buildOutput, buildStderr, note: 'Binary built with sanitizer. Run the binary to detect memory issues.' }; // Valgrind analysis if requested if (args.valgrindAnalysis) { try { let valgrindCmd = 'valgrind --tool=memcheck --leak-check=full --show-leak-kinds=all'; if (args.binary) { valgrindCmd += ` ./target/debug/${args.binary}`; } else { valgrindCmd += ' ./target/debug/main'; } if (args.args && args.args.length > 0) { valgrindCmd += ` ${args.args.join(' ')}`; } const { stdout: valgrindOutput, stderr: valgrindStderr } = await execAsync(valgrindCmd, { cwd: projectPath, timeout: 60000 }); results.memoryAnalysis.valgrind = { command: valgrindCmd, output: valgrindOutput, stderr: valgrindStderr, analysis: this.parseValgrindOutput(valgrindStderr) }; } catch (error) { results.memoryAnalysis.valgrindNote = 'Valgrind not available or binary execution failed'; } } return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async expandMacros(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), macroExpansion: {} }; try { // Check if cargo-expand is available try { await execAsync('cargo expand --version', { cwd: projectPath }); } catch (error) { return { success: false, error: 'cargo-expand not installed. Install with: cargo install cargo-expand', installCommand: 'cargo install cargo-expand' }; } // Prepare expand command let expandCmd = 'cargo expand'; if (args.item) { expandCmd += ` ${args.item}`; } if (args.features && args.features.length > 0) { expandCmd += ` --features="${args.features.join(',')}"`; } if (args.pretty) { expandCmd += ' --ugly'; // cargo-expand uses --ugly to disable pretty printing } // Run macro expansion const { stdout, stderr } = await execAsync(expandCmd, { cwd: projectPath, maxBuffer: 1024 * 1024 * 50 // 50MB for large expansions }); results.macroExpansion = { command: expandCmd, expandedCode: stdout, stderr: stderr, item: args.item || 'all', pretty: args.pretty || true }; // Analyze expanded code results.macroExpansion.analysis = { lineCount: stdout.split('\n').length, sizeBytes: Buffer.byteLength(stdout, 'utf8'), containsMacros: stdout.includes('macro_rules!'), generatedItems: this.countGeneratedItems(stdout) }; return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async inspectAssembly(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), assemblyInspection: {} }; try { // Check if cargo-asm is available try { await execAsync('cargo asm --version', { cwd: projectPath }); } catch (error) { return { success: false, error: 'cargo-asm not installed. Install with: cargo install cargo-asm', installCommand: 'cargo install cargo-asm' }; } if (!args.function) { return { success: false, error: 'Function name is required for assembly inspection' }; } // Prepare asm command let asmCmd = `cargo asm ${args.function}`; if (args.target) { asmCmd += ` --target=${args.target}`; } if (args.optimize) { asmCmd += ' --release'; } if (args.format === 'att') { asmCmd += ' --att'; } // Run assembly inspection const { stdout, stderr } = await execAsync(asmCmd, { cwd: projectPath }); results.assemblyInspection = { function: args.function, command: asmCmd, assembly: stdout, stderr: stderr, target: args.target || 'default', optimized: args.optimize || true, format: args.format || 'intel' }; // Analyze assembly results.assemblyInspection.analysis = this.analyzeAssemblyCode(stdout); return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } async detectFramework(projectPath, args) { const results = { success: true, projectPath, timestamp: new Date().toISOString(), frameworkAnalysis: {} }; try { // Get cargo metadata const { stdout: metadataJson } = await execAsync('cargo metadata --format-version 1', { cwd: projectPath }); const metadata = JSON.parse(metadataJson); // Detect frameworks const frameworks = this.identifyRustFrameworks(metadata); results.frameworkAnalysis.detected = frameworks; // Framework-specific analysis for (const framework of frameworks) { switch (framework.name) { case 'axum': results.frameworkAnalysis.axum = await this.analyzeAxumFramework(projectPath, args); break; case 'actix-web': results.frameworkAnalysis.actixWeb = await this.analyzeActixWebFramework(projectPath, args); break; case 'warp': results.frameworkAnalysis.warp = await this.analyzeWarpFramework(projectPath, args); break; case 'rocket': results.frameworkAnalysis.rocket = await this.analyzeRocketFramework(projectPath, args); break; } } // Tokio analysis if async runtime is detected if (args.tokioAnalysis && this.hasTokio(metadata)) { results.frameworkAnalysis.tokio = await this.analyzeTokioRuntime(projectPath); } return results; } catch (error) { results.success = false; results.error = error instanceof Error ? error.message : String(error); return results; } } // Helper methods for parsing and analysis parseCargoToml(content) { // Basic TOML parsing for Cargo.toml const lines = content.split('\n'); const result = { package: {}, dependencies: {}, devDependencies: {} }; let currentSection = ''; for (const line of lines) { const trimmed = line.trim(); if (trimmed.startsWith('[') && trimmed.endsWith(']')) { currentSection = trimmed.slice(1, -1); } else if (trimmed.includes('=') && currentSection) { const [key, value] = trimmed.split('=', 2); const cleanKey = key.trim(); const cleanValue = value.trim().replace(/"/g, ''); if (currentSection === 'package') { result.package[cleanKey] = cleanValue; } else if (currentSection === 'dependencies') { result.dependencies[cleanKey] = cleanValue; } else if (currentSection === 'dev-dependencies') { result.devDependencies[cleanKey] = cleanValue; } } } return result; } analyzeDependencyTree(metadata) { const tree = { directDependencies: 0, totalDependencies: metadata.packages.length, deepestLevel: 0 }; // Count direct dependencies const rootPackage = metadata.packages.find(pkg => metadata.workspace_members.includes(pkg.id)); if (rootPackage) { tree.directDependencies = rootPackage.dependencies.length; } return tree; } async analyzeFeatures(projectPath, metadata) { // Analyze cargo features return { available: [], // Would parse from Cargo.toml default: [], optional: [] }; } async analyzeBuildScripts(projectPath) { const buildScriptPath = path.join(projectPath, 'build.rs'); if (existsSync(buildScriptPath)) { const content = await fs.readFile(buildScriptPath, 'utf8'); return { exists: true, lineCount: content.split('\n').length, hasCargoInstructions: content.includes('cargo:'), analysis: 'Build script found and analyzed' }; } return { exists: false }; } async analyzeProjectStructure(projectPath) { const structure = { srcDir: existsSync(path.join(projectPath, 'src')), libRs: existsSync(path.join(projectPath, 'src', 'lib.rs')), mainRs: existsSync(path.join(projectPath, 'src', 'main.rs')), testsDir: existsSync(path.join(projectPath, 'tests')), benchesDir: existsSync(path.join(projectPath, 'benches')), examplesDir: existsSync(path.join(projectPath, 'examples')) }; return structure; } summarizeBuildMessages(messages) { const summary = { errors: 0, warnings: 0, compiled: 0, success: true }; for (const msg of messages) { if (msg.reason === 'compiler-message') { if (msg.message.level === 'error') { summary.errors++; summary.success = false; } else if (msg.message.level === 'warning') { summary.warnings++; } } else if (msg.reason === 'compiler-artifact') { summary.compiled++; } } return summary; } detectBuildIssues(messages) { const issues = []; for (const msg of messages) { if (msg.reason === 'compiler-message' && msg.message.level === 'error') { issues.push(msg.message.message); } } return issues; } async suggestBuildOptimizations(projectPath, args) { const suggestions = []; // Check if using release profile if (args.profile === 'dev') { suggestions.push('Consider using --release profile for optimized builds'); } // Check for link-time optimization suggestions.push('Enable LTO in Cargo.toml for smaller binaries: lto = true'); suggestions.push('Use codegen-units = 1 for better optimization'); return suggestions; } summarizeTestResults(messages) { const summary = { total: 0, passed: 0, failed: 0, ignored: 0, filtered: 0 }; for (const msg of messages) { if (msg.type === 'test') { summary.total++; if (msg.event === 'ok') { summary.passed++; } else if (msg.event === 'failed') { summary.failed++; } else if (msg.event === 'ignored') { summary.ignored++; } } } return summary; } async runCoverageAnalysis(projectPath, args) { try { // Check if cargo-tarpaulin is available await execAsync('cargo tarpaulin --version', { cwd: projectPath }); const { stdout } = await execAsync('cargo tarpaulin --out Json', { cwd: projectPath }); return JSON.parse(stdout); } catch (error) { return { note: 'Install cargo-tarpaulin for coverage analysis: cargo install cargo-tarpaulin', error: error instanceof Error ? error.message : String(error) }; } } async runBenchmarks(projectPath, args) { try { const { stdout } = await execAsync('cargo bench --message-format=json', { cwd: projectPath }); const benchMessages = stdout.trim().split('\n') .filter(line => line.trim()) .map(line => { try { return JSON.parse(line); } catch { return null; } }) .filter(msg => msg !== null); return { messages: benchMessages, summary: this.summarizeBenchmarkResults(benchMessages) }; } catch (error) { return { error: error instanceof Error ? error.message : String(error) }; } } summarizeBenchmarkResults(messages) { return { totalBenchmarks: messages.filter(msg => msg.type === 'bench').length, completed: messages.filter(msg => msg.type === 'bench' && msg.event === 'ok').length }; } analyzeLicenses(metadata) { const licenses = new Map(); for (const pkg of metadata.packages) { // Would extract license info from package metadata const license = 'MIT'; // Placeholder licenses.set(license, (licenses.get(license) || 0) + 1); } return Object.fromEntries(licenses); } findDuplicateDependencies(metadata) { const versions = new Map(); for (const pkg of metadata.packages) { const baseName = pkg.name; if (!versions.has(baseName)) { versions.set(baseName, []); } versions.get(baseName).push(pkg.version); } return Array.from(versions.entries()) .filter(([name, versions]) => versions.length > 1) .map(([name, versions]) => ({ name, versions })); } buildDependencyTree(metadata) { return { nodes: metadata.resolve.nodes, depth: this.calculateMaxDepth(metadata.resolve.nodes) }; } calculateMaxDepth(nodes) { // Calculate maximum dependency depth return Math.max(...nodes.map(node => node.dependencies.length)); } summarizeClippyResults(messages) { const summary = { warnings: 0, errors: 0, suggestions: 0, categories: new Map() }; for (const msg of messages) { if (msg.reason === 'compiler-message') { if (msg.message.level === 'warning') { summary.warnings++; } else if (msg.message.level === 'error') { summary.errors++; } } } return { warnings: summary.warnings, errors: summary.errors, suggestions: summary.suggestions }; } categorizeClippyLints(messages) { const categories = new Map(); for (const msg of messages) { if (msg.reason === 'compiler-message' && msg.message.code) { const category = msg.message.code.code.split('::')[1] || 'other'; categories.set(category, (categories.get(category) || 0) + 1); } } return Object.fromEntries(categories); } extractClippyFixes(messages) { const fixes = []; for (const msg of messages) { if (msg.reason === 'compiler-message' && msg.message.spans) { for (const span of msg.message.spans) { if (span.suggested_replacement) { fixes.push({ file: span.file_name, line: span.line_start, suggestion: span.suggested_replacement, message: msg.message.message }); } } } }