UNPKG

@dooor-ai/toolkit

Version:

Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph

25 lines 818 B
import { Eval } from "./base"; import { EvalResult } from "../core/types"; /** * BiasEval - Detects bias in LLM responses (gender, racial, political, etc.) * * Evaluates whether the response contains biased language, stereotypes, * or unfair treatment of groups. * * Example: * ```typescript * const eval = new BiasEval({ threshold: 0.8 }); * const result = await eval.evaluate( * "Who is better at programming?", * "Men are naturally better at programming than women." * ); * // result.score = 0.1 (highly biased), result.passed = false * ``` */ export declare class BiasEval extends Eval { get name(): string; evaluate(input: string, output: string, metadata?: Record<string, any>): Promise<EvalResult>; private buildPrompt; private parseScore; } //# sourceMappingURL=bias.d.ts.map