il2cpp-dump-analyzer-mcp
Version:
Agentic RAG system for analyzing IL2CPP dump.cs files from Unity games
591 lines • 21.5 kB
JavaScript
"use strict";
/**
* MCP Performance Optimizer - Intelligent caching and performance optimization for MCP workflows
*
* This module provides comprehensive performance optimization for the IL2CPP Dump Analyzer
* Agentic RAG MCP System, including intelligent caching, bottleneck detection, adaptive
* learning, and resource management.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.MCPPerformanceOptimizer = void 0;
/**
* MCP Performance Optimizer - Comprehensive performance optimization for MCP workflows
*/
class MCPPerformanceOptimizer {
constructor(config = {}) {
this.caches = new Map();
this.cacheStats = new Map();
this.performanceMetrics = [];
this.learningPatterns = new Map();
this.activeRequests = new Map();
this.cacheConfigs = new Map();
this.config = {
enableCaching: true,
enableMonitoring: true,
enableLearning: true,
enableDeduplication: true,
monitoringIntervalMs: 30000, // 30 seconds
memoryThreshold: 80, // 80% memory usage
...config
};
this.initializeCacheConfigurations();
if (this.config.enableMonitoring) {
this.startPerformanceMonitoring();
}
}
/**
* Initialize cache configurations for different MCP operation types
*/
initializeCacheConfigurations() {
// Search results cache - frequently accessed, medium TTL
this.cacheConfigs.set('search', {
maxSizeBytes: 50 * 1024 * 1024, // 50MB
ttlMs: 10 * 60 * 1000, // 10 minutes
maxEntries: 1000,
hitRatioThreshold: 0.7,
adaptiveTtl: true
});
// Analysis results cache - computationally expensive, longer TTL
this.cacheConfigs.set('analysis', {
maxSizeBytes: 100 * 1024 * 1024, // 100MB
ttlMs: 30 * 60 * 1000, // 30 minutes
maxEntries: 500,
hitRatioThreshold: 0.8,
adaptiveTtl: true
});
// Code generation cache - stable results, long TTL
this.cacheConfigs.set('generation', {
maxSizeBytes: 25 * 1024 * 1024, // 25MB
ttlMs: 60 * 60 * 1000, // 1 hour
maxEntries: 200,
hitRatioThreshold: 0.9,
adaptiveTtl: false
});
// Embeddings cache - expensive to compute, very long TTL
this.cacheConfigs.set('embeddings', {
maxSizeBytes: 200 * 1024 * 1024, // 200MB
ttlMs: 24 * 60 * 60 * 1000, // 24 hours
maxEntries: 10000,
hitRatioThreshold: 0.95,
adaptiveTtl: false
});
// Initialize cache instances
for (const cacheType of this.cacheConfigs.keys()) {
this.caches.set(cacheType, new Map());
this.cacheStats.set(cacheType, {
hits: 0,
misses: 0,
evictions: 0,
totalRequests: 0,
hitRatio: 0,
averageResponseTime: 0,
memoryUsage: 0
});
}
}
/**
* Start performance monitoring
*/
startPerformanceMonitoring() {
this.monitoringInterval = setInterval(() => {
this.collectPerformanceMetrics();
this.detectBottlenecks();
this.optimizeCaches();
}, this.config.monitoringIntervalMs);
}
/**
* Stop performance monitoring
*/
stopMonitoring() {
if (this.monitoringInterval) {
clearInterval(this.monitoringInterval);
this.monitoringInterval = undefined;
}
}
/**
* Get cached result or execute operation with caching
*/
async getCachedOrExecute(cacheType, key, operation, context) {
const startTime = Date.now();
try {
// Check for request deduplication
if (this.config.enableDeduplication) {
const deduplicationKey = `${cacheType}:${key}`;
if (this.activeRequests.has(deduplicationKey)) {
return await this.activeRequests.get(deduplicationKey);
}
}
// Check cache first
if (this.config.enableCaching) {
const cached = this.getCachedResult(cacheType, key);
if (cached !== null) {
this.updateCacheStats(cacheType, true, Date.now() - startTime);
return cached;
}
}
// Execute operation with deduplication
const deduplicationKey = `${cacheType}:${key}`;
const operationPromise = operation();
if (this.config.enableDeduplication) {
this.activeRequests.set(deduplicationKey, operationPromise);
}
try {
const result = await operationPromise;
// Cache the result
if (this.config.enableCaching) {
this.setCachedResult(cacheType, key, result, context);
}
this.updateCacheStats(cacheType, false, Date.now() - startTime);
// Learn from execution pattern
if (this.config.enableLearning) {
this.recordLearningPattern(cacheType, key, Date.now() - startTime, context);
}
return result;
}
finally {
if (this.config.enableDeduplication) {
this.activeRequests.delete(deduplicationKey);
}
}
}
catch (error) {
this.updateCacheStats(cacheType, false, Date.now() - startTime);
throw error;
}
}
/**
* Get cached result
*/
getCachedResult(cacheType, key) {
const cache = this.caches.get(cacheType);
const config = this.cacheConfigs.get(cacheType);
if (!cache || !config) {
return null;
}
const entry = cache.get(key);
if (!entry) {
return null;
}
// Check TTL
const now = Date.now();
const ttl = config.adaptiveTtl ? this.getAdaptiveTtl(cacheType, key) : config.ttlMs;
if (now - entry.timestamp > ttl) {
cache.delete(key);
return null;
}
// Update access time for LRU
entry.lastAccessed = now;
entry.accessCount++;
return entry.data;
}
/**
* Set cached result
*/
setCachedResult(cacheType, key, data, context) {
const cache = this.caches.get(cacheType);
const config = this.cacheConfigs.get(cacheType);
if (!cache || !config) {
return;
}
const now = Date.now();
const entry = {
key,
data,
timestamp: now,
lastAccessed: now,
accessCount: 1,
size: this.estimateSize(data),
metadata: {
createdAt: now,
lastAccessedAt: now,
accessCount: 1,
size: this.estimateSize(data),
ttl: config.ttlMs
},
semanticTags: [],
relatedKeys: [],
context
};
// Check cache size limits
if (cache.size >= config.maxEntries) {
this.evictLeastRecentlyUsed(cacheType);
}
cache.set(key, entry);
}
/**
* Evict least recently used cache entries
*/
evictLeastRecentlyUsed(cacheType) {
const cache = this.caches.get(cacheType);
const stats = this.cacheStats.get(cacheType);
if (!cache || !stats) {
return;
}
let oldestEntry = null;
for (const [key, entry] of cache.entries()) {
if (!oldestEntry || entry.lastAccessed < oldestEntry.lastAccessed) {
oldestEntry = { key, lastAccessed: entry.lastAccessed };
}
}
if (oldestEntry) {
cache.delete(oldestEntry.key);
stats.evictions++;
}
}
/**
* Get adaptive TTL based on usage patterns
*/
getAdaptiveTtl(cacheType, key) {
const config = this.cacheConfigs.get(cacheType);
const pattern = this.learningPatterns.get(`${cacheType}:${key}`);
if (!config || !pattern) {
return config?.ttlMs || 300000; // 5 minutes default
}
// Increase TTL for frequently accessed items
const accessFrequency = pattern.accessCount / Math.max(1, pattern.totalExecutions);
const frequencyMultiplier = Math.min(2.0, 1.0 + accessFrequency);
// Decrease TTL for items with high variance in execution time
const varianceMultiplier = Math.max(0.5, 1.0 - (pattern.executionTimeVariance / 10000));
return Math.round(config.ttlMs * frequencyMultiplier * varianceMultiplier);
}
/**
* Update cache statistics
*/
updateCacheStats(cacheType, hit, responseTime) {
const stats = this.cacheStats.get(cacheType);
if (!stats) {
return;
}
stats.totalRequests++;
if (hit) {
stats.hits++;
}
else {
stats.misses++;
}
stats.hitRatio = stats.hits / stats.totalRequests;
stats.averageResponseTime = (stats.averageResponseTime * (stats.totalRequests - 1) + responseTime) / stats.totalRequests;
}
/**
* Record learning pattern for adaptive optimization
*/
recordLearningPattern(cacheType, key, executionTime, context) {
const patternKey = `${cacheType}:${key}`;
let pattern = this.learningPatterns.get(patternKey);
if (!pattern) {
pattern = {
key: patternKey,
cacheType,
totalExecutions: 0,
averageExecutionTime: 0,
executionTimeVariance: 0,
accessCount: 0,
lastAccessed: Date.now(),
contexts: []
};
this.learningPatterns.set(patternKey, pattern);
}
// Update execution statistics
pattern.totalExecutions++;
const oldAverage = pattern.averageExecutionTime;
pattern.averageExecutionTime = (oldAverage * (pattern.totalExecutions - 1) + executionTime) / pattern.totalExecutions;
// Update variance (simplified calculation)
const variance = Math.pow(executionTime - pattern.averageExecutionTime, 2);
pattern.executionTimeVariance = (pattern.executionTimeVariance * (pattern.totalExecutions - 1) + variance) / pattern.totalExecutions;
pattern.lastAccessed = Date.now();
// Store context for pattern analysis
if (context && pattern.contexts.length < 10) {
pattern.contexts.push(context);
}
}
/**
* Collect performance metrics
*/
collectPerformanceMetrics() {
const now = Date.now();
const memoryUsage = process.memoryUsage();
const metrics = {
timestamp: now,
memoryUsage: {
heapUsed: memoryUsage.heapUsed,
heapTotal: memoryUsage.heapTotal,
external: memoryUsage.external,
rss: memoryUsage.rss
},
cacheMetrics: new Map(),
activeRequests: this.activeRequests.size,
learningPatterns: this.learningPatterns.size
};
// Collect cache metrics
for (const [cacheType, stats] of this.cacheStats.entries()) {
const cache = this.caches.get(cacheType);
const config = this.cacheConfigs.get(cacheType);
if (cache && config) {
let totalSize = 0;
for (const entry of cache.values()) {
totalSize += entry.size || 0;
}
stats.memoryUsage = totalSize;
metrics.cacheMetrics.set(cacheType, { ...stats });
}
}
this.performanceMetrics.push(metrics);
// Keep only last 100 metrics entries
if (this.performanceMetrics.length > 100) {
this.performanceMetrics = this.performanceMetrics.slice(-100);
}
}
/**
* Detect performance bottlenecks
*/
detectBottlenecks() {
const bottlenecks = [];
const latestMetrics = this.performanceMetrics[this.performanceMetrics.length - 1];
if (!latestMetrics) {
return bottlenecks;
}
// Memory bottleneck detection
const memoryUsagePercent = (latestMetrics.memoryUsage.heapUsed / latestMetrics.memoryUsage.heapTotal) * 100;
if (memoryUsagePercent > this.config.memoryThreshold) {
bottlenecks.push({
detected: true,
type: 'memory',
severity: memoryUsagePercent > 95 ? 'critical' : memoryUsagePercent > 90 ? 'high' : 'medium',
description: `High memory usage detected: ${memoryUsagePercent.toFixed(1)}%`,
recommendations: [
'Consider reducing cache sizes',
'Implement more aggressive cache eviction',
'Review memory-intensive operations'
],
metrics: {
memoryUsagePercent,
heapUsed: latestMetrics.memoryUsage.heapUsed,
heapTotal: latestMetrics.memoryUsage.heapTotal
}
});
}
// Cache performance bottleneck detection
for (const [cacheType, stats] of latestMetrics.cacheMetrics.entries()) {
const config = this.cacheConfigs.get(cacheType);
if (config && stats.hitRatio < config.hitRatioThreshold) {
bottlenecks.push({
detected: true,
type: 'cache',
severity: stats.hitRatio < 0.3 ? 'high' : 'medium',
description: `Low cache hit ratio for ${cacheType}: ${(stats.hitRatio * 100).toFixed(1)}%`,
recommendations: [
'Increase cache TTL',
'Review cache key generation strategy',
'Consider pre-warming cache with common queries'
],
metrics: {
hitRatio: stats.hitRatio,
totalRequests: stats.totalRequests,
averageResponseTime: stats.averageResponseTime
}
});
}
}
return bottlenecks;
}
/**
* Optimize caches based on performance metrics and learning patterns
*/
optimizeCaches() {
for (const [cacheType, cache] of this.caches.entries()) {
const config = this.cacheConfigs.get(cacheType);
const stats = this.cacheStats.get(cacheType);
if (!config || !stats) {
continue;
}
// Clean up expired entries
this.cleanupExpiredEntries(cacheType);
// Optimize cache size based on hit ratio
if (stats.hitRatio < config.hitRatioThreshold && cache.size > config.maxEntries * 0.5) {
// Reduce cache size if hit ratio is low
const targetSize = Math.floor(cache.size * 0.8);
this.reduceCacheSize(cacheType, targetSize);
}
}
}
/**
* Clean up expired cache entries
*/
cleanupExpiredEntries(cacheType) {
const cache = this.caches.get(cacheType);
const config = this.cacheConfigs.get(cacheType);
if (!cache || !config) {
return;
}
const now = Date.now();
const expiredKeys = [];
for (const [key, entry] of cache.entries()) {
const ttl = config.adaptiveTtl ? this.getAdaptiveTtl(cacheType, key) : config.ttlMs;
if (now - entry.timestamp > ttl) {
expiredKeys.push(key);
}
}
for (const key of expiredKeys) {
cache.delete(key);
}
}
/**
* Reduce cache size to target size
*/
reduceCacheSize(cacheType, targetSize) {
const cache = this.caches.get(cacheType);
if (!cache || cache.size <= targetSize) {
return;
}
// Sort entries by access frequency and recency
const entries = Array.from(cache.entries()).map(([key, entry]) => ({
key,
entry,
score: entry.accessCount * (1 / (Date.now() - entry.lastAccessed + 1))
}));
entries.sort((a, b) => a.score - b.score);
// Remove least valuable entries
const toRemove = cache.size - targetSize;
for (let i = 0; i < toRemove && i < entries.length; i++) {
cache.delete(entries[i].key);
}
}
/**
* Estimate size of cached data
*/
estimateSize(data) {
try {
return JSON.stringify(data).length * 2; // Rough estimate (UTF-16)
}
catch {
return 1024; // Default size if estimation fails
}
}
/**
* Get performance metrics
*/
getPerformanceMetrics() {
return [...this.performanceMetrics];
}
/**
* Get cache statistics
*/
getCacheStatistics() {
return new Map(this.cacheStats);
}
/**
* Get learning patterns
*/
getLearningPatterns() {
return new Map(this.learningPatterns);
}
/**
* Clear cache for specific type or all caches
*/
clearCache(cacheType) {
if (cacheType) {
const cache = this.caches.get(cacheType);
if (cache) {
cache.clear();
// Reset stats
const stats = this.cacheStats.get(cacheType);
if (stats) {
Object.assign(stats, {
hits: 0,
misses: 0,
evictions: 0,
totalRequests: 0,
hitRatio: 0,
averageResponseTime: 0,
memoryUsage: 0
});
}
}
}
else {
// Clear all caches
for (const cache of this.caches.values()) {
cache.clear();
}
// Reset all stats
for (const stats of this.cacheStats.values()) {
Object.assign(stats, {
hits: 0,
misses: 0,
evictions: 0,
totalRequests: 0,
hitRatio: 0,
averageResponseTime: 0,
memoryUsage: 0
});
}
}
}
/**
* Get optimization recommendations based on current performance
*/
getOptimizationRecommendations() {
const recommendations = [];
const bottlenecks = this.detectBottlenecks();
for (const bottleneck of bottlenecks) {
recommendations.push(...bottleneck.recommendations);
}
// Add general recommendations based on cache performance
for (const [cacheType, stats] of this.cacheStats.entries()) {
if (stats.hitRatio < 0.5 && stats.totalRequests > 100) {
recommendations.push(`Consider reviewing cache strategy for ${cacheType} (low hit ratio: ${(stats.hitRatio * 100).toFixed(1)}%)`);
}
if (stats.averageResponseTime > 1000) {
recommendations.push(`Optimize operations for ${cacheType} (high average response time: ${stats.averageResponseTime.toFixed(0)}ms)`);
}
}
return [...new Set(recommendations)]; // Remove duplicates
}
/**
* Pre-warm cache with common queries
*/
async preWarmCache(cacheType, queries) {
const promises = queries.map(async ({ key, operation }) => {
try {
const result = await operation();
this.setCachedResult(cacheType, key, result);
}
catch (error) {
// Ignore pre-warming errors
console.warn(`Failed to pre-warm cache for ${cacheType}:${key}:`, error);
}
});
await Promise.allSettled(promises);
}
/**
* Export performance data for analysis
*/
exportPerformanceData() {
const cacheStatsObj = {};
for (const [key, value] of this.cacheStats.entries()) {
cacheStatsObj[key] = { ...value };
}
const learningPatternsObj = {};
for (const [key, value] of this.learningPatterns.entries()) {
learningPatternsObj[key] = { ...value };
}
return {
metrics: [...this.performanceMetrics],
cacheStats: cacheStatsObj,
learningPatterns: learningPatternsObj,
config: { ...this.config }
};
}
/**
* Cleanup resources and stop monitoring
*/
dispose() {
this.stopMonitoring();
this.clearCache();
this.performanceMetrics.length = 0;
this.learningPatterns.clear();
this.activeRequests.clear();
}
}
exports.MCPPerformanceOptimizer = MCPPerformanceOptimizer;
//# sourceMappingURL=mcp-performance-optimizer.js.map