UNPKG

@skyramp/mcp

Version:

Skyramp MCP (Model Context Protocol) Server - AI-powered test generation and execution

200 lines (195 loc) 8.85 kB
import { z } from "zod"; import { TestExecutionService } from "../../services/TestExecutionService.js"; import { logger } from "../../utils/logger.js"; import { StateManager, } from "../../utils/AnalysisStateManager.js"; import * as path from "path"; import * as fs from "fs"; import { AnalyticsService } from "../../services/AnalyticsService.js"; const TOOL_NAME = "skyramp_execute_tests_batch"; /** * Register the batch test execution tool with the MCP server * * This tool executes multiple Skyramp tests in parallel with controlled concurrency. * It can accept test information from discovery results or manual test list. */ export function registerExecuteBatchTestsTool(server) { server.registerTool("skyramp_execute_tests_batch", { description: `Execute multiple Skyramp tests in parallel with intelligent batching and concurrency control. **NEXT STEP:** Call \`skyramp_calculate_health_scores\` with all results **KEY FEATURES:** • Parallel Execution: Run up to 5 tests simultaneously for 5x speedup • Batch Processing: Tests processed in controlled batches to manage resources • Isolated Execution: Each test runs in separate Docker container • Result Capture: Pass/fail status, duration, errors, warnings, crash detection • Error Resilient: Failed tests don't stop the batch **STATE FILE MODE:** - Provide stateFile from skyramp_analyze_test_drift - Returns summary + updated stateFile path - Pass updated stateFile to skyramp_calculate_health_scores **OUTPUT:** {summary, stateFile, sessionId, stateFileSize, message} with execution results `, inputSchema: { stateFile: z .string() .describe("Path to state file from skyramp_analyze_test_drift (required)"), authToken: z .string() .optional() .default("") .describe("Authentication token for test execution (e.g., Bearer token). Use empty string if no auth required."), timeout: z .number() .optional() .describe("Timeout in milliseconds for each test (default: 300000 = 5 minutes)"), }, _meta: { keywords: ["batch execution", "parallel tests", "run multiple tests"], }, }, async (args) => { let errorResult; try { logger.info(`Starting batch test execution`); // Load tests from state file const stateManager = StateManager.fromStatePath(args.stateFile); const stateData = await stateManager.readData(); const originalTestResults = stateData?.tests || []; const fullState = await stateManager.readFullState(); const repositoryPath = fullState?.metadata.repositoryPath || ""; if (!originalTestResults || originalTestResults.length === 0) { errorResult = { content: [ { type: "text", text: JSON.stringify({ error: "State file is empty or invalid", stateFile: args.stateFile, }, null, 2), }, ], isError: true, }; return errorResult; } logger.info(`Loaded ${originalTestResults.length} tests from state file: ${args.stateFile}`); // Validate repositoryPath if (!repositoryPath || typeof repositoryPath !== "string") { errorResult = { content: [ { type: "text", text: JSON.stringify({ error: "repositoryPath not found in state file metadata", }, null, 2), }, ], isError: true, }; return errorResult; } const absoluteWorkspacePath = path.resolve(repositoryPath); if (!fs.existsSync(absoluteWorkspacePath)) { errorResult = { content: [ { type: "text", text: JSON.stringify({ error: `Workspace path does not exist: ${absoluteWorkspacePath}`, }, null, 2), }, ], isError: true, }; return errorResult; } const testsToExecute = originalTestResults.map((test) => ({ testFile: test.testFile, language: test.language, testType: test.testType, })); // Prepare test execution options const testOptions = testsToExecute.map((test) => ({ testFile: test.testFile, workspacePath: absoluteWorkspacePath, language: test.language, testType: test.testType, token: args.authToken || "", timeout: args.timeout, })); logger.info(`Executing ${testOptions.length} tests in parallel batches (max 5 concurrent)`); // Execute tests in parallel batches const executionService = new TestExecutionService(); const executionResult = await executionService.executeBatch(testOptions); logger.info(`Batch execution complete: ${executionResult.passed} passed, ` + `${executionResult.failed} failed, ${executionResult.crashed} crashed`); // Enrich test results with execution data const enrichedTests = originalTestResults.map((test) => { const execResult = executionResult.results.find((r) => r.testFile === test.testFile); if (execResult) { return { ...test, execution: { passed: execResult.passed, duration: execResult.duration, errors: execResult.errors, warnings: execResult.warnings, crashed: execResult.crashed, stdout: execResult.output, stderr: execResult.errors.join("\n"), executionTimestamp: execResult.executedAt, }, }; } return test; }); // Save to state file await stateManager.writeData({ tests: enrichedTests }, { repositoryPath: absoluteWorkspacePath, step: "execution", }); const stateSize = await stateManager.getSizeFormatted(); logger.info(`Saved ${enrichedTests.length} tests with execution data to state file: ${stateManager.getStatePath()} (${stateSize})`); const responseData = { summary: { totalTests: executionResult.totalTests, passed: executionResult.passed, failed: executionResult.failed, crashed: executionResult.crashed, totalDuration: executionResult.totalDuration, totalDurationSeconds: (executionResult.totalDuration / 1000).toFixed(2), }, stateFile: stateManager.getStatePath(), sessionId: stateManager.getSessionId(), stateFileSize: stateSize, message: `Execution complete. ${executionResult.passed} passed, ${executionResult.failed} failed, ${executionResult.crashed} crashed. Pass stateFile to skyramp_calculate_health_scores.`, generatedAt: new Date().toISOString(), }; return { content: [ { type: "text", text: JSON.stringify(responseData, null, 2), }, ], }; } catch (error) { logger.error(`Batch execution failed: ${error.message}`, error); errorResult = { content: [ { type: "text", text: JSON.stringify({ error: error.message, }, null, 2), }, ], isError: true, }; return errorResult; } finally { AnalyticsService.pushMCPToolEvent(TOOL_NAME, errorResult, {}); } }); }