agentjs-core
Version:
A comprehensive agent-based modeling framework with built-in p5.js visualization
101 lines • 2.68 kB
TypeScript
import { MLAgent } from './MLAgent';
import { AgentAction, MLPerformanceMetrics } from './interfaces';
/**
* Performance manager for ML inference optimization
*/
export declare class MLPerformanceManager {
private static instance;
private inferenceQueue;
private processingBatch;
private batchSize;
private maxQueueSize;
private batchTimeout;
private timeoutId?;
private metrics;
private pendingPromises;
private constructor();
/**
* Get singleton instance
*/
static getInstance(): MLPerformanceManager;
/**
* Queue a prediction request for batch processing
* @param agent Agent requesting prediction
* @param modelName Model to use for prediction
* @param priority Request priority
* @returns Promise resolving to predicted action
*/
queuePrediction(agent: MLAgent, modelName: string, priority?: 'high' | 'normal' | 'low'): Promise<AgentAction>;
/**
* Process a single prediction immediately (bypassing queue)
* @param agent Agent requesting prediction
* @param modelName Model to use
* @returns Predicted action
*/
processImmediatePrediction(agent: MLAgent, modelName: string): Promise<AgentAction>;
/**
* Add request to queue with priority ordering
*/
private addToQueue;
/**
* Schedule batch processing with timeout
*/
private scheduleBatchProcessing;
/**
* Process a batch of inference requests
*/
private processBatch;
/**
* Group requests by model name for batch processing
*/
private groupByModel;
/**
* Process a batch of requests for a specific model
*/
private processBatchForModel;
/**
* Get agent state for prediction
*/
private getAgentState;
/**
* Update performance metrics
*/
private updateMetrics;
/**
* Get current performance metrics
*/
getPerformanceMetrics(): MLPerformanceMetrics;
/**
* Get queue status
*/
getQueueStatus(): {
queueLength: number;
processing: boolean;
batchSize: number;
pendingPromises: number;
};
/**
* Configure batch processing parameters
*/
configure(options: {
batchSize?: number;
maxQueueSize?: number;
batchTimeout?: number;
}): void;
/**
* Clear the inference queue
*/
clearQueue(): void;
/**
* Get memory usage information
*/
getMemoryInfo(): {
framework: any;
tensorflow: any;
};
/**
* Cleanup resources
*/
dispose(): void;
}
//# sourceMappingURL=MLPerformanceManager.d.ts.map