@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
58 lines (57 loc) • 1.87 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.LatencyEval = void 0;
const base_1 = require("./base");
/**
* Evaluates the latency of LLM responses
*
* Score calculation:
* - latency <= target: score = 1.0
* - target < latency < max: linear scale from 1.0 to 0.0
* - latency >= max: score = 0.0
*/
class LatencyEval extends base_1.Eval {
constructor(config = {}) {
super(config);
this.thresholdMs = config.thresholdMs ?? 3000; // 3 seconds
this.targetMs = config.targetMs ?? 1000; // 1 second
this.maxMs = config.maxMs ?? 10000; // 10 seconds
}
get name() {
return "LatencyEval";
}
evaluate(input, output, metadata) {
const latency = metadata?.latency ?? 0;
// Calculate score
let score;
if (latency <= this.targetMs) {
score = 1.0;
}
else if (latency >= this.maxMs) {
score = 0.0;
}
else {
// Linear interpolation between target and max
const range = this.maxMs - this.targetMs;
const delta = latency - this.targetMs;
score = 1.0 - (delta / range);
}
const passed = latency <= this.thresholdMs;
return {
name: this.name,
score: Math.max(0, Math.min(1, score)), // Clamp to [0, 1]
passed,
details: passed
? `Latency ${latency}ms is acceptable (threshold: ${this.thresholdMs}ms)`
: `Latency ${latency}ms exceeds threshold of ${this.thresholdMs}ms`,
metadata: {
latency,
thresholdMs: this.thresholdMs,
targetMs: this.targetMs,
maxMs: this.maxMs,
},
timestamp: new Date(),
};
}
}
exports.LatencyEval = LatencyEval;