@llm-dev-ops/shield-sdk
Version:
Enterprise-grade SDK for securing Large Language Model applications
30 lines • 923 B
TypeScript
import { BaseScanner } from './base.js';
import type { ScanResult } from '../types.js';
/**
* Configuration for toxicity scanner
*/
export interface ToxicityConfig {
/** Categories to detect */
categories?: ToxicityCategory[];
/** Sensitivity level (0.0 - 1.0) */
sensitivity?: number;
/** Custom keywords to detect */
customKeywords?: string[];
/** Keywords to allow (whitelist) */
allowedKeywords?: string[];
}
type ToxicityCategory = 'violence' | 'hate' | 'harassment' | 'self-harm' | 'sexual' | 'profanity';
/**
* Scanner for detecting toxic content
*/
export declare class ToxicityScanner extends BaseScanner {
readonly name = "toxicity";
private patterns;
private sensitivity;
private allowedKeywords;
constructor(config?: ToxicityConfig);
scan(text: string): Promise<ScanResult>;
private escapeRegex;
}
export {};
//# sourceMappingURL=toxicity.d.ts.map