UNPKG

@dooor-ai/toolkit

Version:

Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph

28 lines 918 B
import { Eval } from "./base"; import { EvalResult, EvalConfig } from "../core/types"; interface LatencyEvalConfig extends EvalConfig { /** Threshold in milliseconds (default: 3000ms = 3s) */ thresholdMs?: number; /** Target latency for scoring (default: 1000ms = 1s) */ targetMs?: number; /** Max latency before score is 0 (default: 10000ms = 10s) */ maxMs?: number; } /** * Evaluates the latency of LLM responses * * Score calculation: * - latency <= target: score = 1.0 * - target < latency < max: linear scale from 1.0 to 0.0 * - latency >= max: score = 0.0 */ export declare class LatencyEval extends Eval { private thresholdMs; private targetMs; private maxMs; constructor(config?: LatencyEvalConfig); get name(): string; evaluate(input: string, output: string, metadata?: Record<string, any>): EvalResult; } export {}; //# sourceMappingURL=latency.d.ts.map