llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
47 lines (46 loc) • 1.43 kB
TypeScript
/**
* Health Score Engine
*
* Aggregates engine results into a composite health score.
* Provides actionable health status and recommendations.
*
* WHAT THIS DOES:
* ✅ Combines multiple engine results
* ✅ Calculates weighted health score
* ✅ Provides actionable status levels
* ✅ Generates recommendations
*
* WHAT THIS DOES NOT DO:
* ❌ Predict future health
* ❌ Identify root causes
* ❌ Guarantee accuracy of individual engines
*
* @module engines/runtime/health-score
* @author Haiec
* @license MIT
*/
import { EngineResult, HealthReport } from '../../types/runtime';
/**
* Aggregates engine results into a health report.
*
* @param results - Array of engine results to aggregate
* @param weights - Optional custom weights for each metric
* @returns Comprehensive health report
*
* @example
* const results = [latencyResult, tokenRateResult, fingerprintResult];
* const report = HealthScoreEngine(results);
*
* if (report.health === 'unstable') {
* alert('LLM health critical!');
* }
*/
export declare function HealthScoreEngine(results: EngineResult[], weights?: Record<string, number>): HealthReport;
/**
* Quick health check - returns true if healthy.
*/
export declare function isHealthy(report: HealthReport): boolean;
/**
* Gets severity level for alerting.
*/
export declare function getAlertLevel(report: HealthReport): 'none' | 'info' | 'warning' | 'critical';