@gc-mcp/testorchestrator
Version:
MCP Server for Smart Test Orchestration - intelligently determines what tests to run based on changes
1,524 lines (1,517 loc) • 59.5 kB
JavaScript
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { ListToolsRequestSchema, CallToolRequestSchema } from '@modelcontextprotocol/sdk/types.js';
import { z } from 'zod';
// src/tools/index.ts
// src/utils/logger.ts
var TestOrchestratorLogger = class {
isDebugEnabled;
constructor() {
this.isDebugEnabled = process.env.NODE_ENV === "development" || process.env.DEBUG === "true";
}
info(message, context) {
this.log("INFO", message, context);
}
warn(message, context) {
this.log("WARN", message, context);
}
error(message, context) {
this.log("ERROR", message, context);
}
debug(message, context) {
if (this.isDebugEnabled) {
this.log("DEBUG", message, context);
}
}
log(level, message, context) {
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
const logEntry = {
timestamp,
level,
message,
...context && { context }
};
console.log(JSON.stringify(logEntry));
}
};
var loggerInstance = null;
function getLogger() {
if (!loggerInstance) {
loggerInstance = new TestOrchestratorLogger();
}
return loggerInstance;
}
// src/analyzers/test-impact-analyzer.ts
var TestImpactAnalyzer = class {
logger = getLogger();
testMap = /* @__PURE__ */ new Map();
changeAnalyzer;
riskAssessor;
constructor() {
this.changeAnalyzer = new ChangeAnalyzer();
this.riskAssessor = new RiskAssessor();
}
async analyzeTestImpact(changes) {
this.logger.info("Starting test impact analysis", {
changeCount: changes.length
});
const testMap = await this.buildTestMap();
const changeImpact = await this.changeAnalyzer.analyzeChangeImpact(changes);
const result = {
affectedTests: await this.findAffectedTests(changeImpact, testMap),
riskAssessment: await this.riskAssessor.assessTestRisk(changeImpact),
executionStrategy: await this.determineExecutionStrategy(changeImpact),
coverageGaps: await this.identifyCoverageGaps(changes, testMap)
};
this.logger.info("Test impact analysis completed", {
affectedTests: result.affectedTests.length,
overallRisk: result.riskAssessment.overallRisk,
coverageGaps: result.coverageGaps.length
});
return result;
}
async buildTestMap() {
const testMap = /* @__PURE__ */ new Map();
try {
const testFiles = await this.discoverTestFiles();
for (const testFile of testFiles) {
const testInfo = await this.extractTestInfo(testFile);
testMap.set(testInfo.id, testInfo);
}
} catch (error) {
this.logger.warn("Could not build complete test map", { error });
}
return testMap;
}
async discoverTestFiles() {
return [
"tests/unit/user.test.ts",
"tests/integration/auth.test.ts",
"tests/e2e/checkout.test.ts",
"tests/performance/load.test.ts",
"tests/security/auth.test.ts"
];
}
async extractTestInfo(testFile) {
const testType = this.determineTestType(testFile);
const services = this.extractServicesFromPath(testFile);
return {
id: testFile.replace(/[^a-zA-Z0-9]/g, "_"),
name: this.extractTestName(testFile),
file: testFile,
type: testType,
criticality: this.determineCriticality(testFile, testType),
estimatedTime: this.estimateTestTime(testType),
dependencies: await this.extractDependencies(testFile),
parallelizable: this.canRunInParallel(testType),
services,
coverage: 0.8,
// Placeholder
successRate: 0.95
// Placeholder
};
}
determineTestType(testFile) {
if (testFile.includes("/unit/")) return "unit";
if (testFile.includes("/integration/")) return "integration";
if (testFile.includes("/e2e/")) return "e2e";
if (testFile.includes("/performance/")) return "performance";
if (testFile.includes("/security/")) return "security";
return "unit";
}
extractServicesFromPath(testFile) {
const services = [];
if (testFile.includes("user")) services.push("user-service");
if (testFile.includes("auth")) services.push("auth-service");
if (testFile.includes("checkout")) services.push("payment-service");
return services;
}
extractTestName(testFile) {
const parts = testFile.split("/");
return parts[parts.length - 1].replace(".test.ts", "");
}
determineCriticality(testFile, testType) {
if (testType === "security") return "critical";
if (testType === "e2e") return "high";
if (testType === "integration") return "medium";
return "low";
}
estimateTestTime(testType) {
const timeMap = {
unit: 0.5,
integration: 2,
e2e: 5,
performance: 10,
security: 3
};
return timeMap[testType] || 1;
}
async extractDependencies(testFile) {
return [];
}
canRunInParallel(testType) {
return testType === "unit" || testType === "integration";
}
async findAffectedTests(changeImpact, testMap) {
const affectedTests = /* @__PURE__ */ new Set();
for (const change of changeImpact.changes || []) {
const relatedTests = await this.findTestsForChange(change, testMap);
for (const test of relatedTests) {
affectedTests.add(test);
}
}
return Array.from(affectedTests);
}
async findTestsForChange(change, testMap) {
const tests = [];
const directTests = await this.findDirectTests(change.file, testMap);
tests.push(...directTests);
const serviceTests = await this.findServiceTests(
change.affectedServices,
testMap
);
tests.push(...serviceTests);
if (change.changeType === "api") {
const integrationTests = await this.findIntegrationTests(change, testMap);
tests.push(...integrationTests);
}
return tests;
}
async findDirectTests(file, testMap) {
const tests = [];
const fileName = file.split("/").pop()?.replace(".ts", "") || "";
for (const [_, test] of testMap) {
if (test.file.includes(fileName)) {
tests.push(test);
}
}
return tests;
}
async findServiceTests(services, testMap) {
const tests = [];
for (const [_, test] of testMap) {
if (test.services.some((service) => services.includes(service))) {
tests.push(test);
}
}
return tests;
}
async findIntegrationTests(change, testMap) {
const tests = [];
for (const [_, test] of testMap) {
if (test.type === "integration" && test.services.some(
(service) => change.affectedServices.includes(service)
)) {
tests.push(test);
}
}
return tests;
}
async determineExecutionStrategy(changeImpact) {
const affectedTests = changeImpact.affectedTests || [];
const totalTime = affectedTests.reduce(
(sum, test) => sum + test.estimatedTime,
0
);
return {
approach: totalTime > 10 ? "parallel" : "sequential",
maxParallel: 4,
criticalPath: affectedTests.filter((t) => t.criticality === "critical").map((t) => t.id),
estimatedTime: totalTime,
resourceRequirements: {
memory: 1024,
cpu: 2,
disk: 100,
network: true,
externalServices: ["database", "redis"]
}
};
}
async identifyCoverageGaps(changes, testMap) {
const gaps = [];
for (const change of changes) {
if (change.type === "added" || change.type === "modified") {
gaps.push({
file: change.file,
lines: change.linesChanged,
coverage: 0.6,
// Placeholder
recommendedTests: [`${change.file}.test.ts`],
priority: change.breakingPotential === "high" ? "high" : "medium"
});
}
}
return gaps;
}
};
var ChangeAnalyzer = class {
async analyzeChangeImpact(changes) {
return { changes };
}
};
var RiskAssessor = class {
async assessTestRisk(changeImpact) {
return {
overallRisk: "medium",
riskFactors: [],
mitigationStrategies: [],
recommendedActions: []
};
}
};
// src/analyzers/test-selector.ts
var TestSelector = class {
logger = getLogger();
testRepository;
optimizationEngine;
constructor() {
this.testRepository = new TestRepository();
this.optimizationEngine = new TestOptimizationEngine();
}
async selectOptimalTests(impact, constraints) {
this.logger.info("Starting test selection", {
affectedTests: impact.affectedTests.length,
constraints
});
const candidates = await this.getTestCandidates(impact);
const optimized = await this.optimizationEngine.optimizeForConstraints(
candidates,
constraints
);
const result = {
unitTests: optimized.unit,
integrationTests: optimized.integration,
e2eTests: optimized.e2e,
performanceTests: optimized.performance,
securityTests: optimized.security,
estimatedExecutionTime: optimized.totalTime,
confidenceLevel: optimized.confidence,
riskAssessment: optimized.riskAssessment
};
this.logger.info("Test selection completed", {
totalTests: result.unitTests.length + result.integrationTests.length + result.e2eTests.length + result.performanceTests.length + result.securityTests.length,
estimatedTime: result.estimatedExecutionTime,
confidence: result.confidenceLevel
});
return result;
}
async getTestCandidates(impact) {
const candidates = [];
for (const test of impact.affectedTests) {
candidates.push({
test,
priority: this.calculatePriority(test, impact),
confidence: this.calculateConfidence(test, impact),
estimatedTime: test.estimatedTime
});
}
const riskTests = await this.getRiskBasedTests(impact.riskAssessment);
candidates.push(...riskTests);
const coverageTests = await this.getCoverageGapTests(impact.coverageGaps);
candidates.push(...coverageTests);
return candidates;
}
calculatePriority(test, impact) {
let priority = "low";
if (test.criticality === "critical") priority = "critical";
else if (test.criticality === "high") priority = "high";
else if (impact.riskAssessment.overallRisk === "high") priority = "high";
else if (impact.riskAssessment.overallRisk === "medium")
priority = "medium";
return priority;
}
calculateConfidence(test, impact) {
let confidence = 0.5;
if (test.criticality === "critical") confidence += 0.3;
else if (test.criticality === "high") confidence += 0.2;
confidence += test.successRate * 0.2;
if (impact.riskAssessment.overallRisk === "high") confidence -= 0.1;
else if (impact.riskAssessment.overallRisk === "critical")
confidence -= 0.2;
return Math.max(0, Math.min(1, confidence));
}
async getRiskBasedTests(riskAssessment) {
const candidates = [];
for (const riskFactor of riskAssessment.riskFactors || []) {
if (riskFactor.impact === "high" || riskFactor.impact === "critical") {
const relevantTests = await this.findTestsForRiskFactor(riskFactor);
for (const test of relevantTests) {
candidates.push({
test,
priority: "high",
confidence: 0.8,
estimatedTime: test.estimatedTime
});
}
}
}
return candidates;
}
async findTestsForRiskFactor(riskFactor) {
return [];
}
async getCoverageGapTests(coverageGaps) {
const candidates = [];
for (const gap of coverageGaps) {
if (gap.priority === "high") {
const relevantTests = await this.findTestsForCoverageGap(gap);
for (const test of relevantTests) {
candidates.push({
test,
priority: "medium",
confidence: 0.6,
estimatedTime: test.estimatedTime
});
}
}
}
return candidates;
}
async findTestsForCoverageGap(gap) {
return [];
}
};
var TestRepository = class {
// In a real implementation, this would manage test metadata
};
var TestOptimizationEngine = class {
async optimizeForConstraints(candidates, constraints) {
const unit = [];
const integration = [];
const e2e = [];
const performance = [];
const security = [];
for (const candidate of candidates) {
const testPlan = {
testType: candidate.test.type,
services: candidate.test.services,
testFiles: [candidate.test.file],
priority: candidate.priority,
estimatedTime: candidate.estimatedTime,
dependencies: candidate.test.dependencies,
parallelizable: candidate.test.parallelizable,
confidence: candidate.confidence,
rationale: `Selected based on ${candidate.priority} priority and ${candidate.confidence} confidence`
};
switch (candidate.test.type) {
case "unit":
unit.push(testPlan);
break;
case "integration":
integration.push(testPlan);
break;
case "e2e":
e2e.push(testPlan);
break;
case "performance":
performance.push(testPlan);
break;
case "security":
security.push(testPlan);
break;
}
}
const totalTime = unit.reduce((sum, t) => sum + t.estimatedTime, 0) + integration.reduce((sum, t) => sum + t.estimatedTime, 0) + e2e.reduce((sum, t) => sum + t.estimatedTime, 0) + performance.reduce((sum, t) => sum + t.estimatedTime, 0) + security.reduce((sum, t) => sum + t.estimatedTime, 0);
const avgConfidence = candidates.reduce((sum, c) => sum + c.confidence, 0) / candidates.length;
return {
unit,
integration,
e2e,
performance,
security,
totalTime,
confidence: avgConfidence,
riskAssessment: {
overallRisk: "medium",
riskFactors: [],
mitigationStrategies: [],
recommendedActions: []
}
};
}
};
// src/analyzers/test-optimization-engine.ts
var TestOptimizationEngine2 = class {
logger = getLogger();
async optimizeForConstraints(candidates, constraints) {
this.logger.info("Starting test optimization", {
candidateCount: candidates.length,
constraints
});
this.parseOptimizationGoals(constraints);
const executionPlan = await this.createExecutionPlan(
candidates,
constraints
);
const parallelGroups = await this.createParallelGroups(
executionPlan,
constraints
);
const result = {
executionOrder: executionPlan,
parallelGroups,
totalEstimatedTime: this.calculateTotalTime(executionPlan),
confidenceScore: this.calculateConfidenceScore(candidates),
riskAssessment: this.assessOverallRisk(candidates),
resourceRequirements: this.calculateResourceRequirements(executionPlan)
};
this.logger.info("Test optimization completed", {
executionSteps: result.executionOrder.length,
parallelGroups: result.parallelGroups.length,
totalTime: result.totalEstimatedTime,
confidence: result.confidenceScore
});
return result;
}
parseOptimizationGoals(constraints) {
const goals = [];
if (constraints.maxExecutionTime < 300) {
goals.push("minimize_time");
}
if (constraints.confidenceThreshold > 0.8) {
goals.push("maximize_confidence");
}
if (constraints.maxMemoryUsage < 4096) {
goals.push("minimize_resources");
}
return goals;
}
async createExecutionPlan(candidates, constraints) {
const steps = [];
const sortedCandidates = this.sortCandidatesByPriority(candidates);
let stepNumber = 1;
let currentTime = 0;
for (const candidate of sortedCandidates) {
if (currentTime + candidate.estimatedTime > constraints.maxExecutionTime) {
this.logger.warn("Skipping test due to time constraints", {
test: candidate.test.name,
estimatedTime: candidate.estimatedTime,
currentTime,
maxTime: constraints.maxExecutionTime
});
break;
}
const testPlan = this.candidateToTestPlan(candidate);
steps.push({
step: stepNumber++,
testGroup: `group_${stepNumber}`,
tests: [testPlan],
dependencies: candidate.test.dependencies,
estimatedTime: candidate.estimatedTime,
parallelizable: candidate.test.parallelizable,
criticalPath: candidate.priority === "critical"
});
currentTime += candidate.estimatedTime;
}
return steps;
}
sortCandidatesByPriority(candidates) {
const priorityOrder = { critical: 0, high: 1, medium: 2, low: 3 };
return candidates.sort((a, b) => {
const priorityDiff = priorityOrder[a.priority] - priorityOrder[b.priority];
if (priorityDiff !== 0) return priorityDiff;
return b.confidence - a.confidence;
});
}
candidateToTestPlan(candidate) {
return {
testType: candidate.test.type,
services: candidate.test.services,
testFiles: [candidate.test.file],
priority: candidate.priority,
estimatedTime: candidate.estimatedTime,
dependencies: candidate.test.dependencies,
parallelizable: candidate.test.parallelizable,
confidence: candidate.confidence,
rationale: `Selected based on ${candidate.priority} priority and ${candidate.confidence} confidence`
};
}
async createParallelGroups(executionPlan, constraints) {
const groups = [];
const maxParallel = constraints.maxParallelTests || 4;
for (let i = 0; i < executionPlan.length; i += maxParallel) {
const stepGroup = executionPlan.slice(i, i + maxParallel);
const tests = stepGroup.flatMap((step) => step.tests);
if (tests.length > 0) {
groups.push({
groupId: `parallel_group_${Math.floor(i / maxParallel) + 1}`,
tests,
maxParallel: Math.min(maxParallel, tests.length),
estimatedTime: Math.max(
...stepGroup.map((step) => step.estimatedTime)
),
dependencies: this.extractDependencies(stepGroup),
resourceRequirements: this.calculateGroupResources(tests)
});
}
}
return groups;
}
extractDependencies(stepGroup) {
const dependencies = /* @__PURE__ */ new Set();
for (const step of stepGroup) {
for (const dep of step.dependencies) {
dependencies.add(dep);
}
}
return Array.from(dependencies);
}
calculateGroupResources(tests) {
let totalMemory = 0;
let totalCpu = 0;
let totalDisk = 0;
const externalServices = /* @__PURE__ */ new Set();
for (const test of tests) {
switch (test.testType) {
case "unit":
totalMemory += 128;
totalCpu += 0.5;
totalDisk += 10;
break;
case "integration":
totalMemory += 256;
totalCpu += 1;
totalDisk += 50;
externalServices.add("database");
break;
case "e2e":
totalMemory += 512;
totalCpu += 2;
totalDisk += 100;
externalServices.add("database");
externalServices.add("redis");
break;
case "performance":
totalMemory += 1024;
totalCpu += 4;
totalDisk += 200;
externalServices.add("database");
externalServices.add("redis");
externalServices.add("cache");
break;
case "security":
totalMemory += 256;
totalCpu += 1;
totalDisk += 50;
externalServices.add("database");
break;
}
}
return {
memory: totalMemory,
cpu: totalCpu,
disk: totalDisk,
network: externalServices.size > 0,
externalServices: Array.from(externalServices)
};
}
calculateTotalTime(executionPlan) {
return executionPlan.reduce((total, step) => total + step.estimatedTime, 0);
}
calculateConfidenceScore(candidates) {
if (candidates.length === 0) return 0;
const totalConfidence = candidates.reduce(
(sum, candidate) => sum + candidate.confidence,
0
);
return totalConfidence / candidates.length;
}
assessOverallRisk(candidates) {
const criticalTests = candidates.filter(
(c) => c.priority === "critical"
).length;
const highRiskTests = candidates.filter(
(c) => c.priority === "high"
).length;
let overallRisk = "low";
if (criticalTests > 0) overallRisk = "critical";
else if (highRiskTests > 2) overallRisk = "high";
else if (highRiskTests > 0) overallRisk = "medium";
return {
overallRisk,
riskFactors: [],
mitigationStrategies: [],
recommendedActions: []
};
}
calculateResourceRequirements(executionPlan) {
let totalMemory = 0;
let totalCpu = 0;
let totalDisk = 0;
const externalServices = /* @__PURE__ */ new Set();
for (const step of executionPlan) {
for (const test of step.tests) {
switch (test.testType) {
case "unit":
totalMemory += 128;
totalCpu += 0.5;
totalDisk += 10;
break;
case "integration":
totalMemory += 256;
totalCpu += 1;
totalDisk += 50;
externalServices.add("database");
break;
case "e2e":
totalMemory += 512;
totalCpu += 2;
totalDisk += 100;
externalServices.add("database");
externalServices.add("redis");
break;
case "performance":
totalMemory += 1024;
totalCpu += 4;
totalDisk += 200;
externalServices.add("database");
externalServices.add("redis");
externalServices.add("cache");
break;
case "security":
totalMemory += 256;
totalCpu += 1;
totalDisk += 50;
externalServices.add("database");
break;
}
}
}
return {
memory: totalMemory,
cpu: totalCpu,
disk: totalDisk,
network: externalServices.size > 0,
externalServices: Array.from(externalServices)
};
}
};
// src/analyzers/failure-predictor.ts
var FailurePredictor = class {
logger = getLogger();
historicalData;
patternMatcher;
constructor() {
this.historicalData = this.initializeHistoricalData();
this.patternMatcher = new PatternMatcher();
}
async predictFailures(changes, testHistory) {
this.logger.info("Starting failure prediction", {
changeCount: changes.length
});
const predictions = [];
const history = testHistory || this.historicalData;
for (const change of changes) {
const changePattern = await this.extractChangePattern(change);
const similarFailures = await this.findSimilarFailures(
changePattern,
history
);
if (similarFailures.length > 0) {
const prediction = {
change,
predictedFailures: similarFailures,
confidence: this.calculatePredictionConfidence(similarFailures),
riskFactors: this.identifyRiskFactors(change, similarFailures),
mitigationStrategies: this.suggestMitigationStrategies(similarFailures)
};
predictions.push(prediction);
}
}
this.logger.info("Failure prediction completed", {
predictions: predictions.length,
totalFailures: predictions.reduce(
(sum, p) => sum + p.predictedFailures.length,
0
)
});
return predictions;
}
initializeHistoricalData() {
return {
executions: [],
failures: [],
patterns: [],
trends: []
};
}
async extractChangePattern(change) {
return {
fileType: this.extractFileType(change.file),
changeType: change.changeType,
impactScope: change.impactScope,
breakingPotential: change.breakingPotential,
affectedServices: change.affectedServices,
codeComplexity: await this.analyzeCodeComplexity(change)
};
}
extractFileType(file) {
const extension = file.split(".").pop()?.toLowerCase() || "";
const typeMap = {
ts: "typescript",
js: "javascript",
py: "python",
java: "java",
go: "go",
rs: "rust",
sql: "sql",
json: "json",
yaml: "yaml",
yml: "yaml"
};
return typeMap[extension] || "unknown";
}
async analyzeCodeComplexity(change) {
let complexity = 0;
complexity += Math.min(change.linesChanged.length * 0.1, 1);
if (change.breakingPotential === "high") complexity += 0.3;
else if (change.breakingPotential === "medium") complexity += 0.2;
if (change.impactScope === "cross_service") complexity += 0.2;
else if (change.impactScope === "system_wide") complexity += 0.4;
if (change.changeType === "api") complexity += 0.2;
return Math.min(complexity, 1);
}
async findSimilarFailures(pattern, history) {
const similarFailures = [];
for (const failure of history.failures) {
const similarity = await this.calculatePatternSimilarity(
pattern,
failure.changePattern
);
if (similarity > 0.7) {
similarFailures.push({
...failure,
similarity
});
}
}
return similarFailures.sort((a, b) => b.similarity - a.similarity);
}
async calculatePatternSimilarity(pattern1, pattern2) {
let similarity = 0;
let factors = 0;
if (pattern1.fileType === pattern2.fileType) {
similarity += 0.2;
}
factors++;
if (pattern1.changeType === pattern2.changeType) {
similarity += 0.3;
}
factors++;
if (pattern1.impactScope === pattern2.impactScope) {
similarity += 0.2;
}
factors++;
if (pattern1.breakingPotential === pattern2.breakingPotential) {
similarity += 0.2;
}
factors++;
if (Math.abs(pattern1.codeComplexity - pattern2.codeComplexity) <= 0.2) {
similarity += 0.1;
}
factors++;
return similarity / factors;
}
calculatePredictionConfidence(failures) {
if (failures.length === 0) return 0;
const avgSimilarity = failures.reduce((sum, f) => sum + f.similarity, 0) / failures.length;
const countFactor = Math.min(failures.length / 3, 1);
return avgSimilarity * countFactor;
}
identifyRiskFactors(change, failures) {
const riskFactors = [];
const failureTypes = failures.map((f) => f.failureType);
const uniqueFailureTypes = [...new Set(failureTypes)];
for (const failureType of uniqueFailureTypes) {
const count = failureTypes.filter((t) => t === failureType).length;
if (count > 1) {
riskFactors.push(
`Frequent ${failureType} failures with similar changes`
);
}
}
if (change.breakingPotential === "high") {
riskFactors.push("High breaking potential change");
}
if (change.impactScope === "system_wide") {
riskFactors.push("System-wide impact change");
}
if (change.changeType === "api") {
riskFactors.push("API change with potential integration issues");
}
return riskFactors;
}
suggestMitigationStrategies(failures) {
const strategies = [];
const failureTypes = failures.map((f) => f.failureType);
const uniqueFailureTypes = [...new Set(failureTypes)];
for (const failureType of uniqueFailureTypes) {
switch (failureType.toLowerCase()) {
case "timeout":
strategies.push("Increase timeout values for affected tests");
strategies.push("Check for performance regressions");
break;
case "assertion":
strategies.push("Review test assertions for changed functionality");
strategies.push("Update test expectations if behavior changed");
break;
case "integration":
strategies.push("Run integration tests in isolated environment");
strategies.push("Check service dependencies and contracts");
break;
case "data":
strategies.push("Verify test data setup and cleanup");
strategies.push("Check for data migration issues");
break;
default:
strategies.push("Review test setup and dependencies");
strategies.push("Check for environment-specific issues");
}
}
strategies.push("Run tests in staging environment first");
strategies.push("Monitor test execution logs for early warnings");
strategies.push("Have rollback plan ready");
return [...new Set(strategies)];
}
};
var PatternMatcher = class {
// In a real implementation, this would contain sophisticated pattern matching logic
};
var DetermineTestScopeInputSchema = z.object({
changes: z.array(
z.object({
file: z.string(),
type: z.enum(["added", "modified", "deleted"]),
linesChanged: z.array(z.number()),
changeType: z.string(),
impactScope: z.string(),
breakingPotential: z.string(),
affectedServices: z.array(z.string()),
content: z.string().optional(),
previousContent: z.string().optional()
})
),
context: z.object({
task: z.enum(["feature-development", "bug-fixing", "testing", "debugging"]),
affected_services: z.array(z.string()),
environment_type: z.enum(["local", "staging", "testing"])
}),
constraints: z.object({
max_execution_time: z.number().optional(),
available_resources: z.array(z.string()).optional(),
confidence_threshold: z.number().min(0).max(1).optional()
}).optional()
});
var OptimizeTestExecutionInputSchema = z.object({
test_plan: z.object({
unitTests: z.array(z.any()),
integrationTests: z.array(z.any()),
e2eTests: z.array(z.any()),
performanceTests: z.array(z.any()),
securityTests: z.array(z.any()),
estimatedExecutionTime: z.number(),
confidenceLevel: z.number(),
riskAssessment: z.any()
}),
optimization_goals: z.object({
minimize_time: z.boolean().default(true),
maximize_confidence: z.boolean().default(true),
minimize_resources: z.boolean().default(false)
}).optional(),
execution_constraints: z.object({
max_parallel_tests: z.number().default(4),
max_execution_time: z.number().optional(),
available_memory: z.number().optional(),
cpu_cores: z.number().optional()
}).optional()
});
var PredictTestFailuresInputSchema = z.object({
changes: z.array(
z.object({
file: z.string(),
type: z.enum(["added", "modified", "deleted"]),
linesChanged: z.array(z.number()),
changeType: z.string(),
impactScope: z.string(),
breakingPotential: z.string(),
affectedServices: z.array(z.string()),
content: z.string().optional(),
previousContent: z.string().optional()
})
),
test_history: z.object({
executions: z.array(z.any()),
failures: z.array(z.any()),
patterns: z.array(z.any()),
trends: z.array(z.any())
}).optional(),
confidence_threshold: z.number().min(0).max(1).default(0.7),
failure_patterns: z.array(z.string()).optional()
});
var SuggestTestDataSetupInputSchema = z.object({
changes: z.array(
z.object({
file: z.string(),
type: z.enum(["added", "modified", "deleted"]),
linesChanged: z.array(z.number()),
changeType: z.string(),
impactScope: z.string(),
breakingPotential: z.string(),
affectedServices: z.array(z.string()),
content: z.string().optional(),
previousContent: z.string().optional()
})
),
test_plan: z.object({
unitTests: z.array(z.any()),
integrationTests: z.array(z.any()),
e2eTests: z.array(z.any()),
performanceTests: z.array(z.any()),
securityTests: z.array(z.any()),
estimatedExecutionTime: z.number(),
confidenceLevel: z.number(),
riskAssessment: z.any()
}),
data_constraints: z.object({
max_data_size: z.number().optional(),
data_retention_policy: z.string().optional(),
privacy_requirements: z.array(z.string()).optional()
}).optional()
});
var AnalyzeTestCoverageGapsInputSchema = z.object({
changes: z.array(
z.object({
file: z.string(),
type: z.enum(["added", "modified", "deleted"]),
linesChanged: z.array(z.number()),
changeType: z.string(),
impactScope: z.string(),
breakingPotential: z.string(),
affectedServices: z.array(z.string()),
content: z.string().optional(),
previousContent: z.string().optional()
})
),
current_coverage: z.object({
overall: z.number(),
byFile: z.record(z.number()),
byService: z.record(z.number()),
byType: z.record(z.number())
}),
coverage_threshold: z.number().min(0).max(1).default(0.8),
focus_areas: z.array(z.string()).optional()
});
var TestScopeOutputSchema = z.object({
unitTests: z.array(
z.object({
testType: z.literal("unit"),
services: z.array(z.string()),
testFiles: z.array(z.string()),
priority: z.enum(["critical", "high", "medium", "low"]),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
parallelizable: z.boolean(),
confidence: z.number(),
rationale: z.string()
})
),
integrationTests: z.array(
z.object({
testType: z.literal("integration"),
services: z.array(z.string()),
testFiles: z.array(z.string()),
priority: z.enum(["critical", "high", "medium", "low"]),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
parallelizable: z.boolean(),
confidence: z.number(),
rationale: z.string()
})
),
e2eTests: z.array(
z.object({
testType: z.literal("e2e"),
services: z.array(z.string()),
testFiles: z.array(z.string()),
priority: z.enum(["critical", "high", "medium", "low"]),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
parallelizable: z.boolean(),
confidence: z.number(),
rationale: z.string()
})
),
performanceTests: z.array(
z.object({
testType: z.literal("performance"),
services: z.array(z.string()),
testFiles: z.array(z.string()),
priority: z.enum(["critical", "high", "medium", "low"]),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
parallelizable: z.boolean(),
confidence: z.number(),
rationale: z.string()
})
),
securityTests: z.array(
z.object({
testType: z.literal("security"),
services: z.array(z.string()),
testFiles: z.array(z.string()),
priority: z.enum(["critical", "high", "medium", "low"]),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
parallelizable: z.boolean(),
confidence: z.number(),
rationale: z.string()
})
),
estimatedExecutionTime: z.number(),
confidenceLevel: z.number(),
riskAssessment: z.object({
overallRisk: z.enum(["low", "medium", "high", "critical"]),
riskFactors: z.array(
z.object({
factor: z.string(),
impact: z.enum(["low", "medium", "high", "critical"]),
probability: z.number(),
description: z.string(),
mitigation: z.string()
})
),
mitigationStrategies: z.array(z.string()),
recommendedActions: z.array(z.string())
})
});
var OptimizedTestPlanOutputSchema = z.object({
executionOrder: z.array(
z.object({
step: z.number(),
testGroup: z.string(),
tests: z.array(z.any()),
dependencies: z.array(z.string()),
estimatedTime: z.number(),
parallelizable: z.boolean(),
criticalPath: z.boolean()
})
),
parallelGroups: z.array(
z.object({
groupId: z.string(),
tests: z.array(z.any()),
maxParallel: z.number(),
estimatedTime: z.number(),
dependencies: z.array(z.string()),
resourceRequirements: z.object({
memory: z.number(),
cpu: z.number(),
disk: z.number(),
network: z.boolean(),
externalServices: z.array(z.string())
})
})
),
totalEstimatedTime: z.number(),
confidenceScore: z.number(),
riskAssessment: z.object({
overallRisk: z.enum(["low", "medium", "high", "critical"]),
riskFactors: z.array(
z.object({
factor: z.string(),
impact: z.enum(["low", "medium", "high", "critical"]),
probability: z.number(),
description: z.string(),
mitigation: z.string()
})
),
mitigationStrategies: z.array(z.string()),
recommendedActions: z.array(z.string())
}),
resourceRequirements: z.object({
memory: z.number(),
cpu: z.number(),
disk: z.number(),
network: z.boolean(),
externalServices: z.array(z.string())
})
});
z.object({
predictions: z.array(
z.object({
change: z.object({
file: z.string(),
type: z.enum(["added", "modified", "deleted"]),
linesChanged: z.array(z.number()),
changeType: z.string(),
impactScope: z.string(),
breakingPotential: z.string(),
affectedServices: z.array(z.string())
}),
predictedFailures: z.array(
z.object({
testId: z.string(),
failureType: z.string(),
similarity: z.number(),
timestamp: z.date(),
resolution: z.string()
})
),
confidence: z.number(),
riskFactors: z.array(z.string()),
mitigationStrategies: z.array(z.string())
})
),
overallRisk: z.enum(["low", "medium", "high", "critical"]),
recommendations: z.array(z.string())
});
z.object({
dataTypes: z.array(z.string()),
dataSize: z.number(),
setupStrategy: z.enum(["minimal", "comprehensive", "targeted"]),
privacyRequirements: z.array(z.string()),
retentionPolicy: z.string(),
estimatedSetupTime: z.number(),
setupInstructions: z.array(z.string()),
dataSources: z.array(z.string()),
validationChecks: z.array(z.string())
});
z.object({
currentCoverage: z.number(),
targetCoverage: z.number(),
gaps: z.array(
z.object({
file: z.string(),
lines: z.array(z.number()),
coverage: z.number(),
recommendedTests: z.array(z.string()),
priority: z.enum(["high", "medium", "low"])
})
),
recommendations: z.array(z.string()),
estimatedEffort: z.number(),
priority: z.enum(["high", "medium", "low"]),
improvementPlan: z.array(
z.object({
phase: z.string(),
actions: z.array(z.string()),
estimatedTime: z.number(),
expectedImprovement: z.number()
})
)
});
var logger = getLogger();
var ConfigSchema = z.object({
// Test Configuration
testOptimizationGoals: z.string().default("minimize_time,maximize_confidence"),
maxParallelTests: z.number().default(4),
testConfidenceThreshold: z.number().min(0).max(1).default(0.8),
// Resource Constraints
maxExecutionTime: z.number().default(300),
maxMemoryUsage: z.number().default(8192),
cpuCores: z.number().default(4),
// Failure Prediction
enableFailurePrediction: z.boolean().default(true),
historicalDataRetentionDays: z.number().default(30),
patternSimilarityThreshold: z.number().min(0).max(1).default(0.7),
// Server Configuration
port: z.number().default(3e3),
host: z.string().default("localhost"),
logLevel: z.enum(["debug", "info", "warn", "error"]).default("info")
});
function loadConfig() {
try {
const config2 = {
testOptimizationGoals: process.env.TEST_OPTIMIZATION_GOALS || "minimize_time,maximize_confidence",
maxParallelTests: parseInt(process.env.MAX_PARALLEL_TESTS || "4"),
testConfidenceThreshold: parseFloat(
process.env.TEST_CONFIDENCE_THRESHOLD || "0.8"
),
maxExecutionTime: parseInt(process.env.MAX_EXECUTION_TIME || "300"),
maxMemoryUsage: parseInt(process.env.MAX_MEMORY_USAGE || "8192"),
cpuCores: parseInt(process.env.CPU_CORES || "4"),
enableFailurePrediction: process.env.ENABLE_FAILURE_PREDICTION === "true",
historicalDataRetentionDays: parseInt(
process.env.HISTORICAL_DATA_RETENTION_DAYS || "30"
),
patternSimilarityThreshold: parseFloat(
process.env.PATTERN_SIMILARITY_THRESHOLD || "0.7"
),
port: parseInt(process.env.PORT || "3000"),
host: process.env.HOST || "localhost",
logLevel: process.env.LOG_LEVEL || "info"
};
const validatedConfig = ConfigSchema.parse(config2);
logger.info("Configuration loaded successfully", {
optimizationGoals: validatedConfig.testOptimizationGoals,
maxParallelTests: validatedConfig.maxParallelTests,
confidenceThreshold: validatedConfig.testConfidenceThreshold
});
return validatedConfig;
} catch (error) {
logger.error("Failed to load configuration", { error });
throw new Error(`Configuration validation failed: ${error}`);
}
}
var config = loadConfig();
// src/tools/index.ts
var SmartTestOrchestratorMcpServer = class {
server;
logger = getLogger();
testImpactAnalyzer;
testSelector;
testOptimizationEngine;
failurePredictor;
constructor() {
this.server = new Server(
{
name: "smart-test-orchestrator",
version: "1.0.0"
},
{
capabilities: {
tools: {}
}
}
);
this.testImpactAnalyzer = new TestImpactAnalyzer();
this.testSelector = new TestSelector();
this.testOptimizationEngine = new TestOptimizationEngine2();
this.failurePredictor = new FailurePredictor();
this.setupToolHandlers();
}
setupToolHandlers() {
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "determine_test_scope",
description: "Intelligently determine what tests to run based on code changes",
inputSchema: {
type: "object",
properties: {
changes: {
type: "array",
items: { type: "object" },
description: "Array of code changes to analyze"
},
context: {
type: "object",
properties: {
task: {
type: "string",
enum: [
"feature-development",
"bug-fixing",
"testing",
"debugging"
],
description: "Type of development task"
},
affected_services: {
type: "array",
items: { type: "string" },
description: "List of affected services"
},
environment_type: {
type: "string",
enum: ["local", "staging", "testing"],
description: "Environment type"
}
},
required: ["task", "affected_services", "environment_type"]
},
constraints: {
type: "object",
properties: {
max_execution_time: {
type: "number",
description: "Maximum execution time in seconds"
},
available_resources: {
type: "array",
items: { type: "string" },
description: "Available resources"
},
confidence_threshold: {
type: "number",
minimum: 0,
maximum: 1,
description: "Confidence threshold"
}
}
}
},
required: ["changes", "context"]
}
},
{
name: "optimize_test_execution",
description: "Optimize test execution order and parallelization",
inputSchema: {
type: "object",
properties: {
test_plan: {
type: "object",
description: "Test plan from determine_test_scope"
},
optimization_goals: {
type: "object",
properties: {
minimize_time: { type: "boolean", default: true },
maximize_confidence: { type: "boolean", default: true },
minimize_resources: { type: "boolean", default: false }
}
},
execution_constraints: {
type: "object",
properties: {
max_parallel_tests: { type: "number", default: 4 },
max_execution_time: { type: "number" },
available_memory: { type: "number" },
cpu_cores: { type: "number" }
}
}
},
required: ["test_plan"]
}
},
{
name: "predict_test_failures",
description: "Predict which tests are likely to fail based on changes",
inputSchema: {
type: "object",
properties: {
changes: {
type: "array",
items: { type: "object" },
description: "Array of code changes to analyze"
},
test_history: {
type: "object",
description: "Historical test execution data"
},
confidence_threshold: {
type: "number",
minimum: 0,
maximum: 1,
default: 0.7
},
failure_patterns: {
type: "array",
items: { type: "string" },
description: "Known failure patterns to check"
}
},
required: ["changes"]
}
},
{
name: "suggest_test_data_setup",
description: "Suggest optimal test data setup based on changes",
inputSchema: {
type: "object",
properties: {
changes: {
type: "array",
items: { type: "object" },
description: "Array of code changes to analyze"
},
test_plan: {
type: "object",
description: "Test plan to optimize data for"
},
data_constraints: {
type: "object",
properties: {
max_data_size: { type: "number" },
data_retention_policy: { type: "string" },
privacy_requirements: {
type: "array",
items: { type: "string" }
}
}
}
},
required: ["changes", "test_plan"]
}
},
{
name: "analyze_test_coverage_gaps",
description: "Analyze gaps in test coverage for changed code",
inputSchema: {
type: "object",
properties: {
changes: {
type: "array",
items: { type: "object" },
description: "Array of code changes to analyze"
},
current_coverage: {
type: "object",
description: "Current test coverage data"
},
coverage_threshold: {
type: "number",
minimum: 0,
maximum: 1,
default: 0.8
},
focus_areas: {
type: "array",
items: { type: "string" },
description: "Specific areas to focus coverage analysis on"
}
},
required: ["changes", "current_coverage"]
}
}
]
};
});
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
switch (name) {
case "determine_test_scope":
return await this.handleDetermineTestScope(args);
case "optimize_test_execution":
return await this.handleOptimizeTestExecution(args);
case "predict_test_failures":
return await this.handlePredictTestFailures(args);
case "suggest_test_data_setup":
return await this.handleSuggestTestDataSetup(args);
case "analyze_test_coverage_gaps":
return await this.handleAnalyzeTestCoverageGaps(args);
default:
throw new Error(`Unknown tool: ${name}`);
}
} catch (error) {
this.logger.error("Tool execution failed", { tool: name, error });
return {
content: [
{
type: "text",
text: `Error executing ${name}: ${error instanceof Error ? error.message : "Unknown error"}`
}
]
};
}
});
}
async handleDetermineTestScope(args) {
const input = DetermineTestScopeInputSchema.parse(args);
this.logger.info("Determining test scope", {
changeCount: input.changes.length,
task: input.context.task,
environment: input.context.environment_type
});
const testImpact = await this.testImpactAnalyzer.analyzeTestImpact(
input.changes
);
const constraints = {
maxExecutionTime: input.constraints?.max_execution_time || config.maxExecutionTime,
maxParallelTests: config.maxParallelTests,
maxMemoryUsage: config.maxMemoryUsage,
cpuCores: config.cpuCores,
confiden