@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
71 lines • 2.04 kB
TypeScript
import { Guard } from "./base";
import { GuardResult, GuardConfig } from "../core/types";
export interface PIIGuardConfig extends GuardConfig {
/**
* AI Provider name to use (configured in CortexDB Studio, e.g., "gemini")
* If not provided, uses providerName from toolkitConfig
*/
providerName?: string;
/** Types of PII to detect: email, phone, ssn, credit_card, cpf, address, passport, etc. */
detectTypes?: string[];
/** Action: block, mask, or warn */
action?: "block" | "mask" | "warn";
}
/**
* Guard that detects Personally Identifiable Information (PII) using AI (LLM-as-a-Judge)
*
* Detects various types of PII including:
* - Email addresses, phone numbers
* - Government IDs (CPF, SSN, passport numbers)
* - Financial info (credit cards, bank accounts)
* - Physical addresses, names
* - And other sensitive personal information
*
* Supports 3 actions:
* - block: Blocks the request completely
* - mask: Replaces PII with [TYPE_MASKED] and allows
* - warn: Just logs warning, doesn't block
*
* @example
* ```typescript
* // Block any PII
* const guard = new PIIGuard({
* threshold: 0.7,
* action: "block",
* });
*
* // Mask PII and allow
* const guard = new PIIGuard({
* threshold: 0.7,
* action: "mask",
* detectTypes: ["email", "cpf", "credit_card"],
* });
*
* // Just warn (GDPR/LGPD audit mode)
* const guard = new PIIGuard({
* threshold: 0.7,
* action: "warn",
* });
* ```
*/
export declare class PIIGuard extends Guard {
private providerName?;
private detectTypes;
private action;
constructor(config?: PIIGuardConfig);
get name(): string;
validate(input: string, metadata?: Record<string, any>): Promise<GuardResult>;
/**
* Build prompt for PII detection
*/
private buildPIIDetectionPrompt;
/**
* Parse AI response for PII detection
*/
private parsePIIResponse;
/**
* Use AI to mask PII in the text
*/
private maskPIIWithAI;
}
//# sourceMappingURL=pii.d.ts.map