claude-flow-multilang
Version:
Revolutionary multilingual AI orchestration framework with cultural awareness and DDD architecture
509 lines (420 loc) โข 17.9 kB
JavaScript
import {
printSuccess,
printError,
printWarning,
callRuvSwarmMCP,
checkRuvSwarmAvailable,
} from '../utils.js';
import { promises as fs } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { dirname } from 'path';
import {
initializeMetrics,
getPerformanceReport,
getBottleneckAnalysis,
exportMetrics
} from './performance-metrics.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
export async function analysisAction(subArgs, flags) {
const subcommand = subArgs[0];
const options = flags;
if (options.help || options.h || !subcommand) {
showAnalysisHelp();
return;
}
try {
switch (subcommand) {
case 'bottleneck-detect':
await bottleneckDetectCommand(subArgs, flags);
break;
case 'performance-report':
await performanceReportCommand(subArgs, flags);
break;
case 'token-usage':
await tokenUsageCommand(subArgs, flags);
break;
default:
printError(`Unknown analysis command: ${subcommand}`);
showAnalysisHelp();
}
} catch (err) {
printError(`Analysis command failed: ${err.message}`);
}
}
async function bottleneckDetectCommand(subArgs, flags) {
const options = flags;
const scope = options.scope || 'system';
const target = options.target || 'all';
console.log(`๐ Detecting performance bottlenecks...`);
console.log(`๐ Scope: ${scope}`);
console.log(`๐ฏ Target: ${target}`);
try {
// Initialize metrics system without starting monitoring
await initializeMetrics(false);
// Get real bottleneck analysis
const analysis = await getBottleneckAnalysis(scope, target);
printSuccess(`โ
Bottleneck analysis completed`);
console.log(`\n๐ BOTTLENECK ANALYSIS RESULTS:`);
analysis.bottlenecks.forEach((bottleneck) => {
const icon =
bottleneck.severity === 'critical'
? '๐ด'
: bottleneck.severity === 'warning'
? '๐ก'
: '๐ข';
console.log(
` ${icon} ${bottleneck.severity.toUpperCase()}: ${bottleneck.component} (${bottleneck.metric})`,
);
// Show details if available
if (bottleneck.details) {
bottleneck.details.forEach(detail => {
console.log(` - ${detail.type || detail.id}: ${detail.duration}s`);
});
}
});
if (analysis.recommendations.length > 0) {
console.log(`\n๐ก RECOMMENDATIONS:`);
analysis.recommendations.forEach((rec) => {
console.log(` โข ${rec}`);
});
}
console.log(`\n๐ PERFORMANCE METRICS:`);
console.log(` โข Analysis duration: ${analysis.analysisDuration.toFixed(2)}ms`);
console.log(` โข Confidence score: ${(analysis.confidenceScore * 100).toFixed(0)}%`);
console.log(` โข Issues detected: ${analysis.issuesDetected}`);
// Save detailed report
const reportPath = path.join(process.cwd(), 'analysis-reports', `bottleneck-${Date.now()}.json`);
await fs.mkdir(path.dirname(reportPath), { recursive: true });
await fs.writeFile(reportPath, JSON.stringify(analysis, null, 2));
console.log(`\n๐ Detailed report saved to: ${reportPath}`);
} catch (err) {
printError(`Bottleneck analysis failed: ${err.message}`);
console.log('\nFalling back to simulated analysis...');
// Fallback to simulated data
console.log(`\n๐ BOTTLENECK ANALYSIS RESULTS (SIMULATED):`);
console.log(` ๐ด CRITICAL: Memory usage in agent spawn process (85% utilization)`);
console.log(` ๐ก WARNING: Task queue processing (12s avg)`);
console.log(` ๐ข GOOD: Neural training pipeline (optimal)`);
console.log(` ๐ข GOOD: Swarm coordination latency (within limits)`);
}
}
async function performanceReportCommand(subArgs, flags) {
const options = flags;
const timeframe = options.timeframe || '24h';
const format = options.format || 'summary';
console.log(`๐ Generating performance report...`);
console.log(`โฐ Timeframe: ${timeframe}`);
console.log(`๐ Format: ${format}`);
try {
// Initialize metrics system without starting monitoring
await initializeMetrics(false);
// Get real performance data
const report = await getPerformanceReport(timeframe);
printSuccess(`โ
Performance report generated`);
console.log(`\n๐ PERFORMANCE SUMMARY (${timeframe}):`);
console.log(` ๐ Total tasks executed: ${report.summary.totalTasks}`);
console.log(` โ
Success rate: ${report.summary.successRate.toFixed(1)}%`);
console.log(` โฑ๏ธ Average execution time: ${report.summary.avgDuration.toFixed(1)}s`);
console.log(` ๐ค Agents spawned: ${report.summary.agentsSpawned}`);
console.log(` ๐พ Memory efficiency: ${report.summary.memoryEfficiency.toFixed(0)}%`);
console.log(` ๐ง Neural learning events: ${report.summary.neuralEvents}`);
// Show trends if available
if (report.trends) {
console.log(`\n๐ TRENDS:`);
if (report.trends.successRateChange !== 0) {
const trend = report.trends.successRateChange > 0 ? 'improved' : 'decreased';
console.log(` โข Task success rate ${trend} ${Math.abs(report.trends.successRateChange).toFixed(1)}% vs previous period`);
}
if (report.trends.durationChange !== 0) {
const trend = report.trends.durationChange < 0 ? 'reduced' : 'increased';
console.log(` โข Average execution time ${trend} by ${Math.abs(report.trends.durationChange / 1000).toFixed(1)}s`);
}
if (report.trends.taskVolumeChange !== 0) {
const trend = report.trends.taskVolumeChange > 0 ? 'increased' : 'decreased';
const percent = Math.abs((report.trends.taskVolumeChange / report.summary.totalTasks) * 100).toFixed(0);
console.log(` โข Task volume ${trend} ${percent}%`);
}
}
if (format === 'detailed' && report.agentMetrics) {
console.log(`\n๐ DETAILED METRICS:`);
console.log(` Agent Performance:`);
Object.entries(report.agentMetrics).forEach(([type, metrics]) => {
console.log(` - ${type} agents: ${metrics.successRate.toFixed(0)}% success, ${(metrics.avgDuration / 1000).toFixed(1)}s avg`);
});
}
// Export full report
const reportPath = await exportMetrics(format === 'json' ? 'json' : 'html');
console.log(`\n๐ Full report: ${reportPath}`);
} catch (err) {
printError(`Failed to generate performance report: ${err.message}`);
printWarning('Showing simulated data as fallback...');
// Fallback to simulated data
console.log(`\n๐ PERFORMANCE SUMMARY (${timeframe}) - SIMULATED:`);
console.log(` ๐ Total tasks executed: 127`);
console.log(` โ
Success rate: 94.5%`);
console.log(` โฑ๏ธ Average execution time: 8.3s`);
console.log(` ๐ค Agents spawned: 23`);
console.log(` ๐พ Memory efficiency: 78%`);
console.log(` ๐ง Neural learning events: 45`);
}
}
async function tokenUsageCommand(subArgs, flags) {
const options = flags;
const agent = options.agent || 'all';
const breakdown = options.breakdown || false;
const costAnalysis = options['cost-analysis'] || false;
console.log(`๐ข Analyzing token usage...`);
console.log(`๐ค Agent filter: ${agent}`);
console.log(`๐ Include breakdown: ${breakdown ? 'Yes' : 'No'}`);
console.log(`๐ฐ Include cost analysis: ${costAnalysis ? 'Yes' : 'No'}`);
try {
// Get real token usage from Claude Code metrics
const tokenData = await getRealTokenUsage(agent);
printSuccess(`โ
Token usage analysis completed`);
// Display real token usage
console.log(`\n๐ข TOKEN USAGE SUMMARY:`);
console.log(` ๐ Total tokens consumed: ${tokenData.total.toLocaleString()}`);
console.log(` ๐ฅ Input tokens: ${tokenData.input.toLocaleString()} (${((tokenData.input / tokenData.total) * 100).toFixed(1)}%)`);
console.log(` ๐ค Output tokens: ${tokenData.output.toLocaleString()} (${((tokenData.output / tokenData.total) * 100).toFixed(1)}%)`);
if (costAnalysis) {
const cost = calculateCost(tokenData);
console.log(` ๐ฐ Estimated cost: $${cost.total.toFixed(2)}`);
console.log(` Input cost: $${cost.input.toFixed(2)}`);
console.log(` Output cost: $${cost.output.toFixed(2)}`);
}
if (breakdown && tokenData.byAgent) {
console.log(`\n๐ BREAKDOWN BY AGENT TYPE:`);
Object.entries(tokenData.byAgent).forEach(([agentType, usage]) => {
const percentage = ((usage / tokenData.total) * 100).toFixed(1);
const icon = getAgentIcon(agentType);
console.log(` ${icon} ${agentType}: ${usage.toLocaleString()} tokens (${percentage}%)`);
});
console.log(`\n๐ก OPTIMIZATION OPPORTUNITIES:`);
const opportunities = generateOptimizationSuggestions(tokenData);
opportunities.forEach(suggestion => {
console.log(` โข ${suggestion}`);
});
}
// Generate real CSV report
const reportPath = await generateTokenUsageReport(tokenData, agent);
console.log(`\n๐ Detailed usage log: ${reportPath}`);
} catch (err) {
printError(`Failed to get real token usage: ${err.message}`);
printWarning('Falling back to simulated data...');
// Fallback to simulated data
await showSimulatedTokenUsage(breakdown, costAnalysis);
}
}
function showAnalysisHelp() {
console.log(`
๐ Analysis Commands - Performance & Usage Analytics
USAGE:
claude-flow analysis <command> [options]
COMMANDS:
bottleneck-detect Detect performance bottlenecks in the system
performance-report Generate comprehensive performance reports
token-usage Analyze token consumption and costs
BOTTLENECK DETECT OPTIONS:
--scope <scope> Analysis scope (default: system)
Options: system, swarm, agent, task, memory
--target <target> Specific target to analyze (default: all)
Examples: agent-id, swarm-id, task-type
PERFORMANCE REPORT OPTIONS:
--timeframe <time> Report timeframe (default: 24h)
Options: 1h, 6h, 24h, 7d, 30d
--format <format> Report format (default: summary)
Options: summary, detailed, json, csv
TOKEN USAGE OPTIONS:
--agent <agent> Filter by agent type or ID (default: all)
--breakdown Include detailed breakdown by agent type
--cost-analysis Include cost projections and optimization
EXAMPLES:
# Detect system-wide bottlenecks
claude-flow analysis bottleneck-detect --scope system
# Agent-specific bottleneck analysis
claude-flow analysis bottleneck-detect --scope agent --target coordinator-1
# Weekly performance report
claude-flow analysis performance-report --timeframe 7d --format detailed
# Token usage with breakdown
claude-flow analysis token-usage --breakdown --cost-analysis
# Swarm-specific analysis
claude-flow analysis bottleneck-detect --scope swarm --target swarm-123
๐ฏ Analysis helps with:
โข Performance optimization
โข Cost management
โข Resource allocation
โข Bottleneck identification
โข Trend analysis
`);
}
// Helper functions for real token tracking
async function getRealTokenUsage(agent) {
// Check if Claude Code OpenTelemetry is configured
const isOTelEnabled = process.env.CLAUDE_CODE_ENABLE_TELEMETRY === '1';
if (!isOTelEnabled) {
// Try to read from local metrics file if OTel is not enabled
return await getLocalTokenMetrics(agent);
}
// Get metrics from OpenTelemetry
return await getOTelTokenMetrics(agent);
}
async function getLocalTokenMetrics(agent) {
// Look for Claude Code metrics in standard locations
const metricsPath = path.join(process.env.HOME || '', '.claude', 'metrics', 'usage.json');
try {
const data = await fs.readFile(metricsPath, 'utf8');
const metrics = JSON.parse(data);
// Extract token usage from metrics
const tokenData = {
total: 0,
input: 0,
output: 0,
byAgent: {}
};
// Process metrics based on Claude Code format
if (metrics.sessions) {
metrics.sessions.forEach(session => {
if (session.tokenUsage) {
tokenData.total += session.tokenUsage.total || 0;
tokenData.input += session.tokenUsage.input || 0;
tokenData.output += session.tokenUsage.output || 0;
// Track by agent if available
if (session.agentType && (agent === 'all' || session.agentType === agent)) {
tokenData.byAgent[session.agentType] =
(tokenData.byAgent[session.agentType] || 0) + (session.tokenUsage.total || 0);
}
}
});
}
return tokenData;
} catch (err) {
// Fallback to environment variables or defaults
return getEnvironmentTokenMetrics(agent);
}
}
async function getEnvironmentTokenMetrics(agent) {
// Check for token tracking in environment or config
const configPath = path.join(process.cwd(), '.claude-flow', 'token-usage.json');
try {
const data = await fs.readFile(configPath, 'utf8');
return JSON.parse(data);
} catch (err) {
// Return empty data structure
return {
total: 0,
input: 0,
output: 0,
byAgent: {}
};
}
}
async function getOTelTokenMetrics(agent) {
// This would integrate with OpenTelemetry exporters
// For now, return placeholder implementation
return {
total: 0,
input: 0,
output: 0,
byAgent: {}
};
}
function calculateCost(tokenData) {
// Anthropic Claude pricing (as of knowledge cutoff)
// Claude 3 Opus: $15/$75 per million tokens (input/output)
// Claude 3 Sonnet: $3/$15 per million tokens
// Claude 3 Haiku: $0.25/$1.25 per million tokens
// Default to Sonnet pricing for Claude Code
const inputPricePerMillion = 3.00;
const outputPricePerMillion = 15.00;
return {
input: (tokenData.input / 1000000) * inputPricePerMillion,
output: (tokenData.output / 1000000) * outputPricePerMillion,
total: ((tokenData.input / 1000000) * inputPricePerMillion) +
((tokenData.output / 1000000) * outputPricePerMillion)
};
}
function getAgentIcon(agentType) {
const icons = {
'coordinator': '๐ฏ',
'developer': '๐จโ๐ป',
'researcher': '๐',
'analyzer': '๐',
'tester': '๐งช',
'architect': '๐๏ธ',
'reviewer': '๐๏ธ'
};
return icons[agentType.toLowerCase()] || '๐ค';
}
function generateOptimizationSuggestions(tokenData) {
const suggestions = [];
// Analyze token usage patterns
if (tokenData.byAgent) {
Object.entries(tokenData.byAgent).forEach(([agentType, usage]) => {
const percentage = (usage / tokenData.total) * 100;
if (percentage > 40) {
suggestions.push(`${agentType} agents: Consider prompt optimization (-15% potential)`);
}
if (percentage > 25) {
suggestions.push(`${agentType} agents: Implement response caching (-8% potential)`);
}
});
}
// General suggestions based on total usage
if (tokenData.total > 100000) {
suggestions.push('Consider using Claude Haiku for non-critical tasks (-90% cost)');
}
if (tokenData.output > tokenData.input * 2) {
suggestions.push('High output ratio: Consider more concise prompts');
}
return suggestions.length > 0 ? suggestions : ['Token usage is within optimal range'];
}
async function generateTokenUsageReport(tokenData, agent) {
const reportsDir = path.join(process.cwd(), 'analysis-reports');
// Create reports directory if it doesn't exist
try {
await fs.mkdir(reportsDir, { recursive: true });
} catch (err) {
// Directory might already exist
}
const timestamp = Date.now();
const reportPath = path.join(reportsDir, `token-usage-${timestamp}.csv`);
// Generate CSV content
let csvContent = 'Timestamp,Agent,Input Tokens,Output Tokens,Total Tokens,Cost\\n';
if (tokenData.byAgent) {
Object.entries(tokenData.byAgent).forEach(([agentType, usage]) => {
const agentInput = Math.floor(usage * 0.6); // Estimate 60% input
const agentOutput = usage - agentInput;
const agentCost = calculateCost({ input: agentInput, output: agentOutput, total: usage });
csvContent += `${new Date().toISOString()},${agentType},${agentInput},${agentOutput},${usage},$${agentCost.total.toFixed(4)}\\n`;
});
} else {
const cost = calculateCost(tokenData);
csvContent += `${new Date().toISOString()},${agent},${tokenData.input},${tokenData.output},${tokenData.total},$${cost.total.toFixed(4)}\\n`;
}
// Write CSV file
await fs.writeFile(reportPath, csvContent);
return reportPath;
}
async function showSimulatedTokenUsage(breakdown, costAnalysis) {
// Existing simulated data as fallback
console.log(`\n๐ข TOKEN USAGE SUMMARY (Simulated):`);
console.log(` ๐ Total tokens consumed: 45,231`);
console.log(` ๐ฅ Input tokens: 28,567 (63.2%)`);
console.log(` ๐ค Output tokens: 16,664 (36.8%)`);
if (costAnalysis) {
console.log(` ๐ฐ Estimated cost: $0.23`);
}
if (breakdown) {
console.log(`\\n๐ BREAKDOWN BY AGENT TYPE:`);
console.log(` ๐ฏ Coordinator: 12,430 tokens (27.5%)`);
console.log(` ๐จโ๐ป Developer: 18,965 tokens (41.9%)`);
console.log(` ๐ Researcher: 8,734 tokens (19.3%)`);
console.log(` ๐ Analyzer: 5,102 tokens (11.3%)`);
console.log(`\\n๐ก OPTIMIZATION OPPORTUNITIES:`);
console.log(` โข Developer agents: Consider prompt optimization (-15% potential)`);
console.log(` โข Coordinator agents: Implement response caching (-8% potential)`);
}
console.log(`\\n๐ Note: Enable CLAUDE_CODE_ENABLE_TELEMETRY=1 for real metrics`);
}