@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
39 lines • 1.35 kB
TypeScript
import { Eval } from "./base";
import { EvalResult, EvalConfig } from "../core/types";
export interface HallucinationConfig extends EvalConfig {
/** Context to check against (optional, otherwise checks for factual coherence) */
context?: string;
}
/**
* HallucinationEval - Detects when LLM makes up facts or unsupported claims
*
* If context is provided, checks if answer contains claims not in context.
* If no context, evaluates internal consistency and factual plausibility.
*
* Example:
* ```typescript
* const eval = new HallucinationEval({
* threshold: 0.8,
* context: "The Eiffel Tower is 330 meters tall."
* });
* const result = await eval.evaluate(
* "How tall is the Eiffel Tower?",
* "The Eiffel Tower is 450 meters tall and was built in 1950."
* );
* // result.score = 0.2 (high hallucination), result.passed = false
* ```
*/
export declare class HallucinationEval extends Eval {
private context?;
constructor(config?: HallucinationConfig);
get name(): string;
/**
* Set context dynamically
*/
setContext(context: string): void;
evaluate(input: string, output: string, metadata?: Record<string, any>): Promise<EvalResult>;
private buildPromptWithContext;
private buildPromptWithoutContext;
private parseScore;
}
//# sourceMappingURL=hallucination.d.ts.map