@continue-reasoning/agent
Version:
A platform-agnostic AI agent framework for building autonomous AI agents with tool execution capabilities
142 lines • 4.43 kB
TypeScript
/**
* @fileoverview Token Usage Tracking Implementation
*
* This module provides real-time token consumption tracking for AI agents.
* It monitors input/output tokens, calculates cumulative usage, and provides
* warnings when approaching model limits.
*/
import { ITokenTracker, ITokenUsage } from './interfaces.js';
/**
* Real-time token usage tracker implementation
*
* This class tracks token consumption across conversations, providing:
* - Real-time usage updates
* - Cumulative tracking across sessions
* - Percentage-based limit warnings
* - Usage summaries for monitoring
*
* Key features:
* - Thread-safe operation
* - Model-specific token limits
* - Automatic percentage calculations
* - Reset capabilities for new sessions
*
* @example
* ```typescript
* const tracker = new TokenTracker('gemini-pro', 1000000);
* tracker.updateUsage({ inputTokens: 100, outputTokens: 50 });
*
* if (tracker.isApproachingLimit(0.8)) {
* console.log('Warning: 80% of token limit reached');
* }
* ```
*/
export declare class TokenTracker implements ITokenTracker {
private readonly modelName;
private readonly tokenLimit;
/** Current token usage statistics */
private currentUsage;
/** Flag to track if 90% warning has been logged */
private hasLoggedNinetyPercentWarning;
/**
* Constructor for TokenTracker
*
* @param modelName - Name of the AI model being tracked
* @param tokenLimit - Maximum tokens allowed for this model
* @param initialUsage - Initial usage state (for session restoration)
*/
constructor(modelName: string, tokenLimit: number, initialUsage?: Partial<ITokenUsage>);
/**
* Update token usage with new consumption
*
* This method is called after each API request to update the running
* totals. It automatically recalculates percentages and cumulative usage.
*
* @param usage - New token usage to add to totals
*/
updateUsage(usage: {
inputTokens: number;
outputTokens: number;
}): void;
/**
* Get current token usage statistics
*
* Returns a deep copy of the current usage state to prevent
* external modifications.
*
* @returns Current token usage information
*/
getUsage(): ITokenUsage;
/**
* Reset token tracking for new session
*
* Clears all usage counters while preserving the token limit.
* Useful when starting a new conversation or session.
*/
reset(): void;
/**
* Check if approaching token limit
*
* Compares current cumulative usage against the specified threshold
* to provide early warnings before hitting model limits.
*
* @param threshold - Warning threshold as percentage (0.0 to 1.0)
* @returns True if approaching limit
*/
isApproachingLimit(threshold?: number): boolean;
/**
* Get usage summary for debugging and monitoring
*
* Provides a human-readable summary of current token usage,
* useful for logging and debugging purposes.
*
* @returns Formatted usage summary string
*/
getUsageSummary(): string;
/**
* Get detailed usage breakdown
*
* Provides detailed usage metrics for advanced monitoring
* and analytics purposes.
*
* @returns Detailed usage metrics object
*/
getDetailedUsage(): {
basic: ITokenUsage;
efficiency: {
outputRatio: number;
averageInputPerRequest: number;
averageOutputPerRequest: number;
};
limits: {
isApproachingWarning: boolean;
isApproachingDanger: boolean;
remainingTokens: number;
estimatedRequestsRemaining: number;
};
};
/**
* Export usage data for persistence
*
* Returns serializable usage data that can be saved and restored
* later to maintain usage tracking across sessions.
*
* @returns Serializable usage data
*/
exportUsageData(): {
modelName: string;
tokenLimit: number;
usage: ITokenUsage;
timestamp: number;
};
/**
* Recalculate usage percentage
*
* Updates the usage percentage based on current cumulative tokens
* and the configured token limit.
*
* @private
*/
private recalculatePercentage;
}
//# sourceMappingURL=tokenTracker.d.ts.map