llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
44 lines (43 loc) • 1.41 kB
TypeScript
/**
* Fingerprint Engine
*
* Detects behavioral drift by analyzing response structure patterns.
* Uses entropy, sentence structure, and length patterns to identify changes.
*
* WHAT THIS DOES:
* ✅ Calculates response fingerprint (tokens, sentences, entropy)
* ✅ Compares to baseline fingerprint
* ✅ Detects structural drift in responses
*
* WHAT THIS DOES NOT DO:
* ❌ Analyze semantic content
* ❌ Detect quality changes
* ❌ Identify specific model changes
*
* @module engines/runtime/fingerprint
* @author Haiec
* @license MIT
*/
import { CallRecord, EngineResult, ResponseFingerprint } from '../../types/runtime';
/**
* Extracts fingerprint from response text.
*/
declare function extractFingerprint(text: string): ResponseFingerprint;
/**
* Analyzes response fingerprint for behavioral drift.
*
* @param call - The call record to analyze
* @param baselineFingerprint - Baseline fingerprint for comparison
* @returns Engine result with fingerprint analysis
*
* @example
* const result = FingerprintEngine(callRecord, baseline.fingerprint);
* if (result.status === 'warn') {
* console.log('Response structure has changed');
* }
*/
export declare function FingerprintEngine(call: CallRecord, baselineFingerprint: ResponseFingerprint | Record<string, never>): EngineResult;
/**
* Utility to extract fingerprint for external use.
*/
export { extractFingerprint };