llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
58 lines (57 loc) • 1.6 kB
TypeScript
/**
* llmverify Error Classes
*
* Custom error types for clear error handling.
*
* @module errors
* @author Haiec
* @license MIT
*/
import { ErrorCode, ErrorMetadata } from './errors/codes';
/**
* Base error class for llmverify
*/
export declare class LLMVerifyError extends Error {
readonly code?: ErrorCode;
readonly metadata?: ErrorMetadata;
readonly requestId?: string;
constructor(message: string, code?: ErrorCode, details?: any, requestId?: string);
toJSON(): {
name: string;
message: string;
code: ErrorCode | undefined;
metadata: ErrorMetadata | undefined;
stack: string | undefined;
};
}
/**
* Privacy violation error
*/
export declare class PrivacyViolationError extends LLMVerifyError {
constructor(message: string, details?: any, requestId?: string);
}
/**
* Validation error
*/
export declare class ValidationError extends LLMVerifyError {
constructor(message: string, code?: ErrorCode, details?: any, requestId?: string);
}
/**
* Verification error
*/
export declare class VerificationError extends LLMVerifyError {
constructor(message: string, code?: ErrorCode, details?: any, requestId?: string);
}
/**
* Configuration error
*/
export declare class ConfigurationError extends LLMVerifyError {
constructor(message: string, code?: ErrorCode, details?: any, requestId?: string);
}
/**
* Engine error
*/
export declare class EngineError extends LLMVerifyError {
readonly engineName: string;
constructor(engineName: string, message: string, details?: any, requestId?: string);
}