il2cpp-dump-analyzer-mcp
Version:
Agentic RAG system for analyzing IL2CPP dump.cs files from Unity games
157 lines (156 loc) • 4.29 kB
TypeScript
/**
* MCP Performance Optimizer - Intelligent caching and performance optimization for MCP workflows
*
* This module provides comprehensive performance optimization for the IL2CPP Dump Analyzer
* Agentic RAG MCP System, including intelligent caching, bottleneck detection, adaptive
* learning, and resource management.
*/
import { MCPExecutionContext, PerformanceMetrics, LearningPattern } from './types.js';
/**
* Performance optimization configuration
*/
interface OptimizationConfig {
/** Enable intelligent caching */
enableCaching: boolean;
/** Enable performance monitoring */
enableMonitoring: boolean;
/** Enable adaptive learning */
enableLearning: boolean;
/** Enable request deduplication */
enableDeduplication: boolean;
/** Performance monitoring interval in ms */
monitoringIntervalMs: number;
/** Memory usage threshold for cleanup (percentage) */
memoryThreshold: number;
}
/**
* Cache statistics for monitoring and optimization
*/
interface CacheStats {
hits: number;
misses: number;
evictions: number;
totalRequests: number;
hitRatio: number;
averageResponseTime: number;
memoryUsage: number;
}
/**
* MCP Performance Optimizer - Comprehensive performance optimization for MCP workflows
*/
export declare class MCPPerformanceOptimizer {
private caches;
private cacheStats;
private performanceMetrics;
private learningPatterns;
private activeRequests;
private config;
private cacheConfigs;
private monitoringInterval?;
constructor(config?: Partial<OptimizationConfig>);
/**
* Initialize cache configurations for different MCP operation types
*/
private initializeCacheConfigurations;
/**
* Start performance monitoring
*/
private startPerformanceMonitoring;
/**
* Stop performance monitoring
*/
stopMonitoring(): void;
/**
* Get cached result or execute operation with caching
*/
getCachedOrExecute<T>(cacheType: string, key: string, operation: () => Promise<T>, context?: MCPExecutionContext): Promise<T>;
/**
* Get cached result
*/
private getCachedResult;
/**
* Set cached result
*/
private setCachedResult;
/**
* Evict least recently used cache entries
*/
private evictLeastRecentlyUsed;
/**
* Get adaptive TTL based on usage patterns
*/
private getAdaptiveTtl;
/**
* Update cache statistics
*/
private updateCacheStats;
/**
* Record learning pattern for adaptive optimization
*/
private recordLearningPattern;
/**
* Collect performance metrics
*/
private collectPerformanceMetrics;
/**
* Detect performance bottlenecks
*/
private detectBottlenecks;
/**
* Optimize caches based on performance metrics and learning patterns
*/
private optimizeCaches;
/**
* Clean up expired cache entries
*/
private cleanupExpiredEntries;
/**
* Reduce cache size to target size
*/
private reduceCacheSize;
/**
* Estimate size of cached data
*/
private estimateSize;
/**
* Get performance metrics
*/
getPerformanceMetrics(): PerformanceMetrics[];
/**
* Get cache statistics
*/
getCacheStatistics(): Map<string, CacheStats>;
/**
* Get learning patterns
*/
getLearningPatterns(): Map<string, LearningPattern>;
/**
* Clear cache for specific type or all caches
*/
clearCache(cacheType?: string): void;
/**
* Get optimization recommendations based on current performance
*/
getOptimizationRecommendations(): string[];
/**
* Pre-warm cache with common queries
*/
preWarmCache(cacheType: string, queries: Array<{
key: string;
operation: () => Promise<any>;
}>): Promise<void>;
/**
* Export performance data for analysis
*/
exportPerformanceData(): {
metrics: PerformanceMetrics[];
cacheStats: Record<string, CacheStats>;
learningPatterns: Record<string, LearningPattern>;
config: OptimizationConfig;
};
/**
* Cleanup resources and stop monitoring
*/
dispose(): void;
}
export {};