llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
54 lines (53 loc) • 1.62 kB
TypeScript
/**
* Classification Engine Implementation
*
* Main engine that orchestrates all classification modules.
*
* @module engines/classification/engine
* @author Haiec
* @license MIT
*/
import { ClassificationResult, ClassificationPolicy } from './types';
/**
* Classification Engine class.
*
* @example
* const engine = new ClassificationEngine();
* const result = engine.classify(prompt, output);
*
* console.log(result.intent); // 'summary'
* console.log(result.hallucinationRisk); // 0.3
* console.log(result.tags); // ['intent:summary', 'hallucination:low']
*/
export declare class ClassificationEngine {
private policy;
constructor(policy?: ClassificationPolicy);
/**
* Classifies LLM output.
*
* @param prompt - The original prompt
* @param output - The LLM output to classify
* @returns Complete classification result
*/
classify(prompt: string, output: string): ClassificationResult;
/**
* Updates the classification policy.
*/
setPolicy(policy: ClassificationPolicy): void;
}
/**
* Convenience function for one-off classification.
*
* @param prompt - The original prompt
* @param output - The LLM output to classify
* @param policy - Optional classification policy
* @returns Classification result
*
* @example
* const result = classify(
* 'Summarize this article',
* 'The article discusses...',
* { instructionRules: [{ id: 'format', type: 'format', params: { expect: 'paragraph' } }] }
* );
*/
export declare function classify(prompt: string, output: string, policy?: ClassificationPolicy): ClassificationResult;