llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
29 lines (28 loc) • 909 B
TypeScript
/**
* Instruction-Following Evaluation Module
*
* Evaluates whether LLM output follows specified instruction rules.
*
* @module engines/classification/instruction-eval
* @author Haiec
* @license MIT
*/
import { InstructionRule, RuleResult } from './types';
/**
* Result of instruction evaluation.
*/
export interface InstructionEvalResult {
ruleResults: RuleResult[];
complianceRatio: number;
instructionFollowed: boolean;
}
/**
* Evaluates all instruction rules against output.
*
* @param text - The output text
* @param normalizedJson - Parsed JSON if available
* @param rules - Instruction rules to evaluate
* @param isJson - Whether output is valid JSON
* @returns Evaluation result with compliance ratio
*/
export declare function evaluateInstructionRules(text: string, normalizedJson: unknown | undefined, rules: InstructionRule[], isJson: boolean): InstructionEvalResult;