llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
42 lines (41 loc) • 1.09 kB
TypeScript
/**
* llmverify - Main Verification Function
*
* AI Output Verification with honest limitations.
* Local-first, privacy-preserving, transparent.
*
* @module verify
* @author Haiec
* @license MIT
*/
import { Config } from './types/config';
import { VerifyResult } from './types/results';
export interface VerifyOptions {
content: string;
config?: Partial<Config>;
context?: {
isJSON?: boolean;
expectedSchema?: unknown;
skipEngines?: string[];
};
}
/**
* Main verification function
*
* PRIVACY GUARANTEE: Free tier never makes network requests.
* All processing is local unless explicit API key is provided.
*
* @param options - Verification options
* @returns Complete verification result with limitations
*
* @example
* ```typescript
* const result = await verify({
* content: "The Earth is flat."
* });
*
* console.log(result.risk.level); // "moderate"
* console.log(result.limitations); // ["Pattern-based detection only", ...]
* ```
*/
export declare function verify(options: VerifyOptions): Promise<VerifyResult>;