promptforge
Version:
Adaptive Prompt Intelligence & Orchestration SDK - Manage, optimize, and serve prompts for LLMs with versioning, feedback loops, and multi-provider support
46 lines • 1.11 kB
TypeScript
import { CacheEntry, CacheConfig, LLMProvider } from '../types.js';
export declare class SemanticCache {
private cache;
private config;
constructor(config?: Partial<CacheConfig>);
/**
* Get cached result for similar input
*/
get(promptId: string, input: Record<string, string>): Promise<CacheEntry | null>;
/**
* Set cache entry
*/
set(promptId: string, input: Record<string, string>, output: string, metadata: {
provider: LLMProvider;
model: string;
}): Promise<void>;
/**
* Invalidate cache for a prompt
*/
invalidate(promptId: string): Promise<number>;
/**
* Clear all cache
*/
clear(): Promise<void>;
/**
* Get cache statistics
*/
getStats(): {
size: number;
totalHits: number;
hitRate: number;
};
/**
* Hash input for exact matching
*/
private hashInput;
/**
* Evict oldest entries
*/
private evictOldest;
/**
* Cleanup expired entries
*/
cleanup(): Promise<number>;
}
//# sourceMappingURL=cache.d.ts.map