llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
50 lines (49 loc) • 1.36 kB
TypeScript
/**
* Consistency Engine
*
* Checks for internal consistency, contradictions, and behavioral drift.
* All processing is local - no external API calls.
*
* @module engines/consistency
* @author Haiec
* @license MIT
*/
import { Config } from '../../types/config';
import { ConsistencyResult } from '../../types/results';
export declare class ConsistencyEngine {
private config;
private readonly LIMITATIONS;
private readonly METHODOLOGY;
constructor(config: Config);
check(content: string): Promise<ConsistencyResult>;
/**
* Detect sentiment drift across sections
*/
private detectSentimentDrift;
/**
* Calculate sentiment score for a section (-1 to 1)
*/
private calculateSentiment;
/**
* Detect style drift (formal to informal or vice versa)
*/
private detectStyleDrift;
/**
* Calculate style score (-1 = informal, 1 = formal)
*/
private calculateStyleScore;
/**
* Detect significant length drift
*/
private detectLengthDrift;
/**
* Detect numerical inconsistencies
*/
private detectNumericalInconsistencies;
private createMinimalResult;
private calculateSimilarityMatrix;
private calculateAverageSimilarity;
private detectContradictions;
private extractRelevantClaim;
private calculateConfidence;
}