erosolar-cli
Version:
Unified AI agent framework for the command line - Multi-provider support with schema-driven tools, code intelligence, and transparent reasoning
94 lines • 3.34 kB
TypeScript
/**
* Resilient Provider Wrapper
*
* Adds rate limiting, exponential backoff retry, and circuit breaker
* patterns to any LLM provider for maximum reliability and performance.
*
* PERF: Provider-agnostic wrapper that prevents rate limit errors and
* automatically recovers from transient failures.
*/
import type { LLMProvider, ConversationMessage, ProviderToolDefinition, ProviderResponse, StreamChunk, ProviderId } from '../core/types.js';
export interface ResilientProviderConfig {
/** Maximum requests per window (default: 50) */
maxRequestsPerMinute?: number;
/** Maximum retry attempts (default: 4) */
maxRetries?: number;
/** Base delay for exponential backoff in ms (default: 1000) */
baseDelayMs?: number;
/** Maximum delay between retries in ms (default: 32000) */
maxDelayMs?: number;
/** Enable circuit breaker pattern (default: true) */
enableCircuitBreaker?: boolean;
/** Number of failures before circuit opens (default: 5) */
circuitBreakerThreshold?: number;
/** Time before circuit resets in ms (default: 60000) */
circuitBreakerResetMs?: number;
}
/**
* Wraps any LLM provider with rate limiting and retry logic
*/
export declare class ResilientProvider implements LLMProvider {
readonly id: ProviderId;
readonly model: string;
private readonly provider;
private readonly rateLimiter;
private readonly config;
private readonly circuitBreaker;
private stats;
constructor(provider: LLMProvider, config?: ResilientProviderConfig);
/**
* Check and potentially reset circuit breaker
*/
private checkCircuitBreaker;
/**
* Record a failure for circuit breaker
*/
private recordFailure;
/**
* Record a success to reset circuit breaker
*/
private recordSuccess;
/**
* Execute a request with rate limiting and retry
*/
private executeWithResilience;
/**
* Generate a response with resilience
*/
generate(messages: ConversationMessage[], tools: ProviderToolDefinition[]): Promise<ProviderResponse>;
/**
* Generate a streaming response with resilience
*
* Note: Retry logic is limited for streaming - we can only retry
* before the stream starts, not mid-stream.
*/
generateStream(messages: ConversationMessage[], tools: ProviderToolDefinition[]): AsyncIterableIterator<StreamChunk>;
/**
* Get resilience statistics
*/
getStats(): {
totalRequests: number;
rateLimitHits: number;
retries: number;
circuitBreakerTrips: number;
circuitBreakerOpen: boolean;
availableTokens: number;
};
/**
* Reset statistics
*/
resetStats(): void;
}
/**
* Wrap any provider with resilience features
*/
export declare function withResilience(provider: LLMProvider, config?: ResilientProviderConfig): ResilientProvider;
/**
* Provider-specific recommended configurations
*/
export declare const PROVIDER_RESILIENCE_CONFIGS: Record<string, ResilientProviderConfig>;
/**
* Wrap a provider with resilience using provider-specific defaults
*/
export declare function withProviderResilience(provider: LLMProvider, providerId: string, overrides?: Partial<ResilientProviderConfig>): ResilientProvider;
//# sourceMappingURL=resilientProvider.d.ts.map