llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
54 lines (53 loc) • 1.66 kB
TypeScript
/**
* Reasoning Compression Score Module
*
* Detects if LLM output has compressed/shallow reasoning
* relative to prompt complexity.
*
* @module engines/classification/compression
* @author Haiec
* @license MIT
*/
import { CompressionMetrics, ReasoningLabel } from './types';
/** Default expected words per complexity level (relaxed for Level 1) */
export declare const DEFAULT_EXPECTED_WORDS: {
level1: number;
level2: number;
level3: number;
};
/**
* Gets expected minimum words for complexity level.
*
* @param level - Complexity level (1-3)
* @param overrides - Optional word count overrides
*/
export declare function getExpectedWords(level: 1 | 2 | 3, overrides?: {
level1?: number;
level2?: number;
level3?: number;
}): number;
/**
* Calculates reasoning compression metrics.
*
* @param prompt - The original prompt
* @param output - The LLM output
* @param baselineEntropy - Optional baseline entropy for comparison
* @param expectedWordsOverrides - Optional word count overrides per level
* @returns Compression metrics
*/
export declare function calculateCompressionMetrics(prompt: string, output: string, baselineEntropy?: number, expectedWordsOverrides?: {
level1?: number;
level2?: number;
level3?: number;
}): CompressionMetrics;
/**
* Calculates overall reasoning compression score.
*
* @param metrics - Compression metrics
* @returns Compression score (0-1)
*/
export declare function calculateCompressionScore(metrics: CompressionMetrics): number;
/**
* Gets reasoning compression label from score.
*/
export declare function getReasoningLabel(score: number): ReasoningLabel;