anon-identity
Version:
Decentralized identity framework with DIDs, Verifiable Credentials, and privacy-preserving selective disclosure
187 lines • 5.16 kB
TypeScript
/**
* Unified LLM Communication Interface
*
* Provides a consistent API for LLM interactions across different providers
*/
import { EventEmitter } from 'events';
import { LLMRequest, LLMResponse, LLMResponseChunk, LLMRequestType, ConversationContext, FunctionDefinition, FunctionCall, FunctionResult, LLMParameters, RequestPriority, UsageInfo } from './types';
import { MCPClient } from './client';
/**
* Request builder for fluent API creation
*/
export declare class LLMRequestBuilder {
private agentDID;
private sessionId;
private request;
constructor(agentDID: string, sessionId: string);
/**
* Set the prompt/content for the request
*/
prompt(content: string): this;
/**
* Set the request type
*/
type(type: LLMRequestType): this;
/**
* Add conversation context
*/
context(context: ConversationContext): this;
/**
* Add function definitions for function calling
*/
functions(functions: FunctionDefinition[]): this;
/**
* Set LLM parameters
*/
parameters(params: LLMParameters): this;
/**
* Set request priority
*/
priority(priority: RequestPriority): this;
/**
* Add metadata tags
*/
tags(tags: string[]): this;
/**
* Build the final request
*/
build(): LLMRequest;
}
/**
* Response analyzer for processing LLM responses
*/
declare class LLMResponseAnalyzer {
/**
* Extract function calls from response
*/
static extractFunctionCalls(response: LLMResponse): FunctionCall[];
/**
* Extract structured data from response
*/
static extractStructuredData(response: LLMResponse, schema?: any): any;
/**
* Calculate response quality metrics
*/
static calculateQualityMetrics(response: LLMResponse): {
completeness: number;
coherence: number;
relevance: number;
confidence: number;
};
/**
* Simple schema validation
*/
private static validateAgainstSchema;
}
/**
* Context manager for conversation state
*/
declare class ConversationContextManager {
private contexts;
private maxContextTokens;
/**
* Get or create conversation context
*/
getContext(agentDID: string, sessionId: string): ConversationContext;
/**
* Add message to context
*/
addMessage(agentDID: string, sessionId: string, role: 'user' | 'assistant' | 'system' | 'function', content: string, functionCall?: FunctionCall, functionResult?: FunctionResult): void;
/**
* Compress context by summarizing older messages
*/
private compressContext;
/**
* Calculate approximate token count
*/
private calculateTokens;
/**
* Clear context
*/
clearContext(agentDID: string, sessionId: string): void;
/**
* Get all contexts for agent
*/
getAgentContexts(agentDID: string): ConversationContext[];
}
/**
* Main unified LLM interface
*/
export declare class UnifiedLLMInterface extends EventEmitter {
private mcpClient;
private contextManager;
private requestCounter;
private responseCache;
private cacheTTL;
constructor(mcpClient: MCPClient);
/**
* Create a new request builder
*/
createRequest(agentDID: string, sessionId: string): LLMRequestBuilder;
/**
* Send completion request
*/
completion(agentDID: string, sessionId: string, prompt: string, options?: {
parameters?: LLMParameters;
providerId?: string;
priority?: RequestPriority;
useCache?: boolean;
}): Promise<LLMResponse>;
/**
* Send function calling request
*/
functionCall(agentDID: string, sessionId: string, prompt: string, functions: FunctionDefinition[], options?: {
parameters?: LLMParameters;
providerId?: string;
priority?: RequestPriority;
}): Promise<{
response: LLMResponse;
functionCalls: FunctionCall[];
}>;
/**
* Send streaming request
*/
stream(agentDID: string, sessionId: string, prompt: string, options?: {
parameters?: LLMParameters;
providerId?: string;
priority?: RequestPriority;
}): AsyncIterable<LLMResponseChunk>;
/**
* Get conversation context
*/
getContext(agentDID: string, sessionId: string): ConversationContext;
/**
* Clear conversation context
*/
clearContext(agentDID: string, sessionId: string): void;
/**
* Get usage statistics
*/
getUsageStats(providerId?: string): UsageInfo[];
/**
* Health check for all providers
*/
healthCheck(): Promise<Map<string, any>>;
/**
* Setup event handlers
*/
private setupEventHandlers;
/**
* Get cached response
*/
private getCachedResponse;
/**
* Cache response
*/
private cacheResponse;
/**
* Generate cache key for request
*/
private generateCacheKey;
/**
* Clean up expired cache entries
*/
private cleanupCache;
}
export { LLMResponseAnalyzer, ConversationContextManager };
//# sourceMappingURL=interface.d.ts.map