llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
24 lines (23 loc) • 644 B
TypeScript
/**
* Risk Scoring Engine
*
* Calculates overall risk score with honest interpretation.
*
* @module engines/risk-scoring
* @author Haiec
* @license MIT
*/
import { Config } from '../../types/config';
import { VerifyResult, RiskScore } from '../../types/results';
export declare class RiskScoringEngine {
private _config;
constructor(_config: Config);
calculate(result: Partial<VerifyResult>): RiskScore;
private getWeights;
private calculateConsistencyRisk;
private determineLevel;
private identifyBlockers;
private determineAction;
private calculateConfidence;
private generateInterpretation;
}