@ooples/token-optimizer-mcp
Version:
Intelligent context window optimization for Claude Code - store content externally via caching and compression, freeing up your context window for what matters
494 lines • 14.3 kB
TypeScript
/**
* Cache Replication - 88% token reduction through distributed cache coordination
*
* Features:
* - Multiple replication modes (primary-replica, multi-primary, eventual/strong consistency)
* - Automatic conflict resolution (last-write-wins, merge, custom)
* - Automatic failover with replica promotion
* - Incremental sync with delta transmission
* - Health monitoring and lag tracking
* - Regional replication support
* - Write quorum for strong consistency
* - Vector clock-based conflict resolution
*
* Token Reduction Strategy:
* - Compressed replication logs (92% reduction)
* - Delta-based sync transmission (94% reduction)
* - Metadata deduplication (89% reduction)
* - State snapshots with incremental updates (91% reduction)
*/
import { EventEmitter } from 'events';
import { CacheEngine } from '../../core/cache-engine.js';
import { TokenCounter } from '../../core/token-counter.js';
import { MetricsCollector } from '../../core/metrics.js';
/**
* Replication modes
*/
export type ReplicationMode = 'primary-replica' | 'multi-primary' | 'master-slave' | 'peer-to-peer';
/**
* Consistency models
*/
export type ConsistencyModel = 'eventual' | 'strong' | 'causal';
/**
* Conflict resolution strategies
*/
export type ConflictResolution = 'last-write-wins' | 'first-write-wins' | 'merge' | 'custom' | 'vector-clock';
/**
* Node health status
*/
export type NodeHealth = 'healthy' | 'degraded' | 'unhealthy' | 'offline';
/**
* Replication operation types
*/
export type ReplicationOperation = 'configure' | 'add-replica' | 'remove-replica' | 'promote-replica' | 'sync' | 'status' | 'health-check' | 'resolve-conflicts' | 'snapshot' | 'restore' | 'rebalance';
/**
* Replica node information
*/
export interface ReplicaNode {
id: string;
region: string;
endpoint: string;
isPrimary: boolean;
health: NodeHealth;
lastHeartbeat: number;
lag: number;
version: number;
vectorClock: VectorClock;
weight: number;
capacity: number;
used: number;
}
/**
* Vector clock for causal consistency
*/
export interface VectorClock {
[]: number;
}
/**
* Replication entry
*/
export interface ReplicationEntry {
key: string;
value: any;
operation: 'set' | 'delete';
timestamp: number;
version: number;
vectorClock: VectorClock;
nodeId: string;
checksum: string;
}
/**
* Sync delta for incremental replication
*/
export interface SyncDelta {
entries: ReplicationEntry[];
fromVersion: number;
toVersion: number;
compressed: boolean;
size: number;
checksum: string;
}
/**
* Conflict information
*/
export interface Conflict {
key: string;
localEntry: ReplicationEntry;
remoteEntry: ReplicationEntry;
resolution?: ReplicationEntry;
resolvedBy?: ConflictResolution;
timestamp: number;
}
/**
* Replication configuration
*/
export interface ReplicationConfig {
mode: ReplicationMode;
consistency: ConsistencyModel;
conflictResolution: ConflictResolution;
syncInterval: number;
heartbeatInterval: number;
healthCheckInterval: number;
maxLag: number;
writeQuorum: number;
readQuorum: number;
enableCompression: boolean;
enableDelta: boolean;
snapshotInterval: number;
retentionPeriod: number;
}
/**
* Health check results
*/
export interface HealthCheckResult {
nodeId: string;
health: NodeHealth;
lag: number;
lastSync: number;
errors: string[];
warnings: string[];
metrics: {
throughput: number;
latency: number;
errorRate: number;
uptime: number;
};
}
/**
* Replication statistics
*/
export interface ReplicationStats {
mode: ReplicationMode;
consistency: ConsistencyModel;
totalNodes: number;
healthyNodes: number;
primaryNodes: number;
replicaNodes: number;
totalEntries: number;
syncedEntries: number;
pendingEntries: number;
conflicts: number;
resolvedConflicts: number;
averageLag: number;
maxLag: number;
throughput: number;
regions: string[];
healthChecks: HealthCheckResult[];
}
/**
* Snapshot metadata
*/
export interface SnapshotMetadata {
id: string;
version: number;
timestamp: number;
nodeId: string;
entryCount: number;
size: number;
compressed: boolean;
checksum: string;
}
/**
* Cache replication options
*/
export interface CacheReplicationOptions {
operation: ReplicationOperation;
mode?: ReplicationMode;
consistency?: ConsistencyModel;
conflictResolution?: ConflictResolution;
syncInterval?: number;
heartbeatInterval?: number;
writeQuorum?: number;
readQuorum?: number;
enableCompression?: boolean;
nodeId?: string;
region?: string;
endpoint?: string;
weight?: number;
targetNodeId?: string;
force?: boolean;
deltaOnly?: boolean;
conflicts?: Conflict[];
customResolver?: (conflict: Conflict) => ReplicationEntry;
snapshotId?: string;
includeMetadata?: boolean;
useCache?: boolean;
cacheTTL?: number;
}
/**
* Cache replication result
*/
export interface CacheReplicationResult {
success: boolean;
operation: ReplicationOperation;
data: {
config?: ReplicationConfig;
nodes?: ReplicaNode[];
stats?: ReplicationStats;
delta?: SyncDelta;
conflicts?: Conflict[];
snapshot?: {
metadata: SnapshotMetadata;
data: string;
};
healthChecks?: HealthCheckResult[];
};
metadata: {
tokensUsed: number;
tokensSaved: number;
cacheHit: boolean;
executionTime: number;
nodesAffected?: number;
entriesSynced?: number;
};
}
/**
* Cache Replication Tool - Distributed cache coordination
*/
export declare class CacheReplicationTool extends EventEmitter {
private cache;
private tokenCounter;
private metrics;
private config;
private nodes;
private replicationLog;
private currentVersion;
private vectorClock;
private pendingConflicts;
private snapshots;
private syncTimer;
private heartbeatTimer;
private healthCheckTimer;
private snapshotTimer;
private stats;
constructor(cache: CacheEngine, tokenCounter: TokenCounter, metrics: MetricsCollector, nodeId?: string);
/**
* Main entry point for replication operations
*/
run(options: CacheReplicationOptions): Promise<CacheReplicationResult>;
/**
* Configure replication settings
*/
private configure;
/**
* Add replica node
*/
private addReplica;
/**
* Remove replica node
*/
private removeReplica;
/**
* Promote replica to primary
*/
private promoteReplica;
/**
* Synchronize with replicas
*/
private sync;
/**
* Get replication status
*/
private getStatus;
/**
* Perform health check on all nodes
*/
private healthCheck;
/**
* Resolve conflicts
*/
private resolveConflicts;
/**
* Create snapshot
*/
private createSnapshot;
/**
* Restore from snapshot
*/
private restore;
/**
* Rebalance load across replicas
*/
private rebalance;
/**
* Start background tasks
*/
private startBackgroundTasks;
/**
* Restart sync timer
*/
private restartSyncTimer;
/**
* Restart heartbeat timer
*/
private restartHeartbeatTimer;
/**
* Restart health check timer
*/
private restartHealthCheckTimer;
/**
* Restart snapshot timer
*/
private restartSnapshotTimer;
/**
* Send heartbeats to all nodes
*/
private sendHeartbeats;
/**
* Create sync delta
*/
private createSyncDelta;
/**
* Sync with specific node
*/
private syncNode;
/**
* Apply replication entry
*/
private applyReplicationEntry;
/**
* Resolve conflict using vector clocks
*/
private resolveWithVectorClock;
/**
* Check if clock1 happened before clock2
*/
private happenedBefore;
/**
* Merge conflicting entries
*/
private mergeConflict;
/**
* Merge vector clocks
*/
private mergeVectorClocks;
/**
* Increment vector clock for node
*/
private incrementVectorClock;
/**
* Get primary node
*/
private getPrimaryNode;
/**
* Get statistics snapshot
*/
private getStatsSnapshot;
/**
* Calculate node throughput
*/
private calculateNodeThroughput;
/**
* Generate snapshot ID
*/
private generateSnapshotId;
/**
* Clean up old snapshots
*/
private cleanupOldSnapshots;
/**
* Calculate checksum
*/
private calculateChecksum;
/**
* Compress data
*/
private compressData;
/**
* Decompress data
*/
private decompressData;
/**
* Check if operation is cacheable
*/
private isCacheableOperation;
/**
* Get cache key parameters
*/
private getCacheKeyParams;
/**
* Cleanup and dispose
*/
dispose(): void;
}
export declare function getCacheReplicationTool(cache: CacheEngine, tokenCounter: TokenCounter, metrics: MetricsCollector, nodeId?: string): CacheReplicationTool;
/**
* MCP Tool Definition
*/
export declare const CACHE_REPLICATION_TOOL_DEFINITION: {
readonly name: "cache_replication";
readonly description: "Distributed cache replication with 88%+ token reduction. Supports primary-replica and multi-primary modes, strong/eventual consistency, automatic conflict resolution, failover, incremental sync, and health monitoring.";
readonly inputSchema: {
readonly type: "object";
readonly properties: {
readonly operation: {
readonly type: "string";
readonly enum: readonly ["configure", "add-replica", "remove-replica", "promote-replica", "sync", "status", "health-check", "resolve-conflicts", "snapshot", "restore", "rebalance"];
readonly description: "Replication operation to perform";
};
readonly mode: {
readonly type: "string";
readonly enum: readonly ["primary-replica", "multi-primary", "master-slave", "peer-to-peer"];
readonly description: "Replication mode (for configure operation)";
};
readonly consistency: {
readonly type: "string";
readonly enum: readonly ["eventual", "strong", "causal"];
readonly description: "Consistency model (for configure operation)";
};
readonly conflictResolution: {
readonly type: "string";
readonly enum: readonly ["last-write-wins", "first-write-wins", "merge", "custom", "vector-clock"];
readonly description: "Conflict resolution strategy (for configure operation)";
};
readonly syncInterval: {
readonly type: "number";
readonly description: "Sync interval in milliseconds (for configure operation)";
};
readonly heartbeatInterval: {
readonly type: "number";
readonly description: "Heartbeat interval in milliseconds (for configure operation)";
};
readonly writeQuorum: {
readonly type: "number";
readonly description: "Number of replicas required for writes (for configure operation)";
};
readonly readQuorum: {
readonly type: "number";
readonly description: "Number of replicas required for reads (for configure operation)";
};
readonly nodeId: {
readonly type: "string";
readonly description: "Node ID (for add-replica/remove-replica operations)";
};
readonly region: {
readonly type: "string";
readonly description: "Region name (for add-replica operation)";
};
readonly endpoint: {
readonly type: "string";
readonly description: "Node endpoint URL (for add-replica operation)";
};
readonly weight: {
readonly type: "number";
readonly description: "Node weight for load balancing (for add-replica operation)";
};
readonly targetNodeId: {
readonly type: "string";
readonly description: "Target node ID (for promote-replica operation)";
};
readonly force: {
readonly type: "boolean";
readonly description: "Force sync even if up-to-date (for sync operation)";
};
readonly deltaOnly: {
readonly type: "boolean";
readonly description: "Sync only delta changes (for sync operation)";
};
readonly snapshotId: {
readonly type: "string";
readonly description: "Snapshot ID (for restore operation)";
};
readonly includeMetadata: {
readonly type: "boolean";
readonly description: "Include snapshot data in response (for snapshot operation)";
};
readonly useCache: {
readonly type: "boolean";
readonly description: "Enable result caching (default: true)";
readonly default: true;
};
readonly cacheTTL: {
readonly type: "number";
readonly description: "Cache TTL in seconds (default: 300)";
readonly default: 300;
};
};
readonly required: readonly ["operation"];
};
};
/**
* Export runner function
*/
export declare function runCacheReplication(options: CacheReplicationOptions, cache: CacheEngine, tokenCounter: TokenCounter, metrics: MetricsCollector, nodeId?: string): Promise<CacheReplicationResult>;
//# sourceMappingURL=cache-replication.d.ts.map