UNPKG

@dooor-ai/toolkit

Version:

Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph

122 lines (106 loc) 3.65 kB
import { Eval } from "./base"; import { EvalResult, EvalConfig } from "../core/types"; import { getCortexDBClient, getGlobalProviderName } from "../observability/cortexdb-client"; /** * AnswerRelevancyEval - Measures if the LLM's answer is relevant to the question * * Uses LLM-as-judge to score relevancy on a 0-1 scale. * * Example: * ```typescript * const eval = new AnswerRelevancyEval({ threshold: 0.7 }); * const result = await eval.evaluate( * "What is the capital of France?", * "The capital of France is Paris." * ); * // result.score = 0.95, result.passed = true * ``` */ export class AnswerRelevancyEval extends Eval { get name(): string { return "AnswerRelevancyEval"; } async evaluate( input: string, output: string, metadata?: Record<string, any> ): Promise<EvalResult> { const startTime = Date.now(); try { // Use CortexDB AI proxy for evaluation const cortexClient = getCortexDBClient(); const providerName = getGlobalProviderName(); const prompt = this.buildPrompt(input, output); const response = await cortexClient.invokeAI({ prompt, usage: "evaluation", providerName: providerName || undefined, temperature: 0.0, // Deterministic for consistency maxTokens: 200, }); const score = this.parseScore(response.text); const passed = score >= this.getThreshold(); return { name: this.name, score, passed, details: `Answer relevancy score: ${score.toFixed(2)}. ${passed ? "PASSED" : "FAILED"} (threshold: ${this.getThreshold()})`, metadata: { latency: Date.now() - startTime, judgeResponse: response.text, }, timestamp: new Date(), }; } catch (error) { console.error("AnswerRelevancyEval failed:", error); return { name: this.name, score: 0.5, passed: false, details: `Eval failed: ${error instanceof Error ? error.message : "Unknown error"}`, metadata: { error: String(error), latency: Date.now() - startTime, }, timestamp: new Date(), }; } } private buildPrompt(question: string, answer: string): string { return `You are an expert evaluator. Your task is to assess if the answer is relevant to the question. Question: "${question}" Answer: "${answer}" Rate the relevancy on a scale of 0.0 to 1.0: - 1.0 = Perfectly relevant, directly addresses the question - 0.7-0.9 = Mostly relevant, addresses core question with some tangents - 0.4-0.6 = Partially relevant, touches on topic but misses key points - 0.0-0.3 = Not relevant, does not address the question Output ONLY a JSON object in this exact format: { "score": 0.85, "reasoning": "Brief explanation of the score" }`; } private parseScore(response: string): number { try { // Try to extract JSON from response const jsonMatch = response.match(/\{[\s\S]*\}/); if (jsonMatch) { const parsed = JSON.parse(jsonMatch[0]); if (typeof parsed.score === "number") { return Math.max(0, Math.min(1, parsed.score)); } } // Fallback: look for decimal number const numberMatch = response.match(/\b0?\.\d+\b|\b1\.0\b|\b[01]\b/); if (numberMatch) { return Math.max(0, Math.min(1, parseFloat(numberMatch[0]))); } console.warn("Could not parse score from response:", response); return 0.5; // Default middle score on parse failure } catch (error) { console.error("Error parsing score:", error); return 0.5; } } }