llmverify
Version:
AI Output Verification Toolkit — Local-first LLM safety, hallucination detection, PII redaction, prompt injection defense, and runtime monitoring. Zero telemetry. OWASP LLM Top 10 aligned.
16 lines (15 loc) • 375 B
TypeScript
/**
* Harmful Content Detection
*
* Detects potentially harmful content patterns.
* Pattern-based detection with honest limitations.
*
* @module csm6/security/harmful-content
* @author Haiec
* @license MIT
*/
import { Finding } from '../../types/results';
/**
* Check for harmful content
*/
export declare function checkHarmfulContent(content: string): Finding[];