UNPKG

@dooor-ai/toolkit

Version:

Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph

45 lines 1.38 kB
import { Guard } from "./base"; import { GuardResult, GuardConfig } from "../core/types"; export interface ToxicityGuardConfig extends GuardConfig { /** * AI Provider name to use (configured in CortexDB Studio, e.g., "gemini") * If not provided, uses providerName from toolkitConfig */ providerName?: string; /** Categories to check: hate, violence, sexual, harassment */ categories?: string[]; } /** * Guard that detects toxic content using AI (Gemini via CortexDB proxy) * * @example * ```typescript * // Provider from toolkitConfig (recommended) * const guard = new ToxicityGuard({ * threshold: 0.7, * categories: ["hate", "violence", "harassment"] * }); * * // Or specify provider explicitly * const guard = new ToxicityGuard({ * threshold: 0.7, * providerName: "gemini", // Override toolkitConfig.providerName * }); * ``` */ export declare class ToxicityGuard extends Guard { private providerName?; private categories; constructor(config?: ToxicityGuardConfig); get name(): string; validate(input: string, metadata?: Record<string, any>): Promise<GuardResult>; /** * Build prompt for toxicity detection */ private buildToxicityPrompt; /** * Parse AI response for toxicity score and categories */ private parseToxicityResponse; } //# sourceMappingURL=toxicity.d.ts.map