llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
40 lines (39 loc) • 1.19 kB
TypeScript
/**
* Prompt Injection Detection & Deterrence
*
* Comprehensive detection based on OWASP LLM-01.
* Includes detection, sanitization, and deterrence utilities.
*
* @module csm6/security/prompt-injection
* @author Haiec
* @license MIT
*/
import { Finding } from '../../types/results';
/**
* Check for prompt injection attempts
* @param input - The input text to check
* @returns Array of findings
*/
export declare function checkPromptInjection(input: string): Finding[];
/**
* Sanitize input by removing or neutralizing injection attempts
* @param input - The input to sanitize
* @returns Sanitized input and list of removed patterns
*/
export declare function sanitizePromptInjection(input: string): {
sanitized: string;
removed: string[];
wasModified: boolean;
};
/**
* Get risk score for input (0-1)
* @param input - The input to score
* @returns Risk score between 0 and 1
*/
export declare function getInjectionRiskScore(input: string): number;
/**
* Quick check if input is likely safe (no injection detected)
* @param input - The input to check
* @returns true if no injection detected
*/
export declare function isInputSafe(input: string): boolean;