@polybiouslabs/polybious
Version:
Polybius is a next-generation intelligent agent framework built for adaptability across diverse domains. It merges contextual awareness, multi-agent collaboration, and predictive reasoning to deliver dynamic, self-optimizing performance.
258 lines (232 loc) • 6.98 kB
TypeScript
export interface ReasoningTrace {
id: string;
timestamp: Date;
decision?: any;
context: any;
evidence?: any;
explanation?: Explanation;
confidence: number;
reasoning_chain: ReasoningStep[];
alternative_paths: AlternativePath[];
}
export interface Explanation {
summary: string;
detailed_reasoning: DetailedReasoning[];
supporting_evidence: EvidenceItem[];
confidence_explanation: ConfidenceFactor[];
risk_factors: string[];
assumptions_made: string[];
template_used: string;
}
export interface DetailedReasoning {
step: string;
description: string;
factors?: string[];
key_evidence?: string[];
logic?: string;
alternatives_count?: number;
impact: 'low' | 'medium' | 'high' | 'critical';
}
export interface EvidenceItem {
factor: string;
value: any;
reliability: number;
importance: number;
source: string;
description: string;
}
export interface ConfidenceFactor {
factor: string;
score: number;
description: string;
}
export interface ReasoningStep {
step: number;
type: string;
description: string;
state?: any;
inputs?: string[];
outputs?: any;
}
export interface AlternativePath {
alternative: any;
feasibility: number;
expected_outcome: any;
trade_offs: string[];
risk_level: number;
why_not_chosen: string;
}
export interface PredictionExplanation {
id: string;
timestamp: Date;
prediction: any;
inputData: any;
model_info: any;
feature_importance: FeatureImportance[];
confidence_factors: ConfidenceFactor[];
sensitivity_analysis: SensitivityAnalysis[];
example_influences: SimilarExample[];
human_readable_explanation: string;
}
export interface FeatureImportance {
feature: string;
value: any;
importance: number;
influence: number;
explanation: string;
}
export interface SensitivityAnalysis {
feature: string;
baseline_value: number;
variations: Array<{
change: number;
new_value: number;
predicted_impact: number;
}>;
sensitivity_score: number;
}
export interface SimilarExample {
decision_id: string;
similarity: number;
outcome: any;
timestamp: Date;
}
export interface ReasoningPath {
id: string;
timestamp: Date;
query: string;
steps: ProcessedStep[];
logical_flow: LogicalFlow;
assumptions: Assumption[];
evidence_quality: EvidenceQuality;
gaps_and_limitations: Gap[];
alternative_interpretations: AlternativeInterpretation[];
}
export interface ProcessedStep {
step_number: number;
description: string;
logic_type: string;
evidence_used: any[];
assumptions: string[];
confidence: number;
}
export interface LogicalFlow {
is_coherent: boolean;
logical_gaps: Array<{
between_steps: [number, number];
issue: string;
severity: number;
}>;
reasoning_type: 'deductive' | 'inductive' | 'abductive';
flow_quality: number;
}
export interface Assumption {
assumption: string;
step: string;
validity: number;
impact: number;
}
export interface EvidenceQuality {
average_quality: number;
evidence_count: number;
quality_distribution: any;
}
export interface Gap {
type: 'logical_gap' | 'missing_evidence';
location: string;
description: string;
severity: 'low' | 'medium' | 'high';
}
export interface AlternativeInterpretation {
framework: string;
reasoning: string;
conclusion: string;
confidence: number;
}
export interface ActionSequenceExplanation {
id: string;
timestamp: Date;
actions: any[];
context: any;
outcomes: any[];
action_analysis: any;
decision_points: any[];
causal_relationships: any[];
optimization_suggestions: string[];
counterfactual_analysis: any;
}
export interface Insights {
timestamp: Date;
domain: string;
timeframe: string;
total_decisions_analyzed: number;
patterns: Pattern[];
trends: Trend[];
anomalies: Anomaly[];
recommendations: Recommendation[];
confidence_metrics: ConfidenceMetrics;
key_insights: string[];
}
export interface Pattern {
type: string;
description: string;
confidence: number;
}
export interface Trend {
type: string;
description: string;
magnitude: number;
}
export interface Anomaly {
decision_id: string;
type: string;
severity: string;
description: string;
}
export interface Recommendation {
type: string;
priority: 'low' | 'medium' | 'high';
description: string;
expected_impact: string;
}
export interface ConfidenceMetrics {
sample_size_adequacy: number;
data_quality: number;
temporal_coverage: number;
}
export declare class ExplainableAI {
reasoningTraces: Map<string, ReasoningTrace>;
decisionHistory: Array<{
id: string;
timestamp: Date;
decision: any;
confidence: number;
}>;
explanationTemplates: Map<string, any>;
insightCache: Map<string, any>;
explanationsPath: string;
constructor();
explainDecision(decision: any, context: any, evidence?: any): Promise<ReasoningTrace>;
explainPrediction(prediction: any, inputData: any, model_info?: any): Promise<PredictionExplanation>;
generateReasoningPath(query: string, steps?: any[]): Promise<ReasoningPath>;
explainActionSequence(actions: any[], context: any, outcomes?: any[]): Promise<ActionSequenceExplanation>;
generateInsights(domain?: string, timeframe?: string): Promise<Insights>;
generateExplanation(decision: any, context: any, evidence: any): Promise<Explanation>;
generateExplanationSummary(decision: any, context: any, evidence: any): Promise<string>;
generateDetailedReasoning(decision: any, context: any, evidence: any): Promise<DetailedReasoning[]>;
formatSupportingEvidence(evidence: any): EvidenceItem[];
explainConfidence(decision: any, evidence: any): ConfidenceFactor[];
buildReasoningChain(decision: any, context: any, evidence: any): ReasoningStep[];
analyzeAlternatives(decision: any, context: any, evidence: any): Promise<AlternativePath[]>;
analyzeFeatureImportance(inputData: any, prediction: any, model_info: any): Promise<FeatureImportance[]>;
identifyConfidenceFactors(prediction: any, inputData: any): ConfidenceFactor[];
performSensitivityAnalysis(inputData: any, prediction: any, model_info: any): Promise<SensitivityAnalysis[]>;
findSimilarExamples(inputData: any): SimilarExample[];
generateHumanReadableExplanation(prediction: any, inputData: any, model_info: any): Promise<string>;
initializeTemplates(): void;
normalizeContext(context: any): any;
normalizeInputData(inputData: any): any;
calculateExplanationConfidence(decision: any, evidence: any): number;
loadExplanationData(): Promise<void>;
saveExplanationData(): Promise<void>;
}