@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
134 lines • 4.45 kB
TypeScript
import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
import { Guard } from "../guards/base";
import { Eval } from "../evals/base";
import { ObservabilityCollector } from "../observability/logger";
interface DOOORCallbackConfig {
guards?: Guard[];
evals?: Eval[];
outputGuards?: Guard[];
observability?: ObservabilityCollector;
evalMode?: "async" | "sync" | "sample";
evalSampleRate?: number;
guardFailureMode?: "throw" | "return_error" | "log_only";
modelName?: string;
}
/**
* Callback handler that implements guards, evals, and observability
*/
export declare class DOOORCallbackHandler extends BaseCallbackHandler {
name: string;
private guards;
private evals;
private outputGuards;
private observability?;
private evalMode;
private evalSampleRate;
private guardFailureMode;
private defaultModelName;
private currentModel?;
private currentTraceId?;
private currentInput?;
private startTime?;
private guardsResults;
private toolCalls;
private pendingToolCalls;
private currentToolStart?;
private toolCallsByTrace;
private currentSessionId?;
private traceSequence;
constructor(config: DOOORCallbackConfig);
/**
* Called before LLM starts (LangChain lifecycle hook)
*/
handleLLMStart(llm: {
name?: string;
}, prompts: string[], runId: string, _parentRunId?: string, _extraParams?: Record<string, any>, _tags?: string[], _metadata?: Record<string, any>, _runName?: string): Promise<void>;
/**
* Backwards compatibility with LangChain < 0.3 which calls onLLMStart
*/
onLLMStart(llm: {
name?: string;
}, prompts: string[], runId: string): Promise<void>;
private processLLMStart;
/**
* Called after LLM completes (LangChain lifecycle hook)
*/
handleLLMEnd(output: any, runId: string, _parentRunId?: string, _tags?: string[], _extraParams?: Record<string, any>): Promise<void>;
/**
* Backwards compatibility with LangChain < 0.3 which calls onLLMEnd
*/
onLLMEnd(output: any, runId: string): Promise<void>;
private processLLMEnd;
/**
* Extract tool calls from LLM output (for LangGraph agents where tool hooks aren't called)
*/
private extractToolCallsFromOutput;
/**
* Extract tool output from input (for LangGraph agents where tool result is in next LLM input)
*/
private extractToolOutputFromInput;
/**
* Called when a tool starts execution (LangChain lifecycle hook)
*/
handleToolStart(tool: {
name?: string;
id?: string[];
}, input: string, runId: string): Promise<void>;
/**
* Backwards compatibility - some versions call onToolStart
*/
onToolStart(tool: {
name?: string;
id?: string[];
}, input: string, runId: string): Promise<void>;
/**
* Called when a tool finishes execution (LangChain lifecycle hook)
*/
handleToolEnd(output: string, runId: string): Promise<void>;
/**
* Backwards compatibility - some versions call onToolEnd
*/
onToolEnd(output: string, runId: string): Promise<void>;
/**
* Called when a tool errors (LangChain lifecycle hook)
*/
handleToolError(error: Error, runId: string): Promise<void>;
/**
* Backwards compatibility - some versions call onToolError
*/
onToolError(error: Error, runId: string): Promise<void>;
/**
* Persist tool call updates to observability backend
*/
private persistToolCallUpdate;
/**
* Called on LLM error (LangChain lifecycle hook)
*/
handleLLMError(error: Error, runId: string, _parentRunId?: string, _tags?: string[], _extraParams?: Record<string, any>): Promise<void>;
/**
* Backwards compatibility with LangChain < 0.3 which calls onLLMError
*/
onLLMError(error: Error, runId: string): Promise<void>;
/**
* Run evals asynchronously (non-blocking)
*/
private runEvalsAsync;
/**
* Run evals synchronously (blocking)
*/
private runEvalsSync;
/**
* Determine if evals should run (sampling logic)
*/
private shouldRunEvals;
/**
* Extract text output from LangChain response
*/
private extractOutputText;
/**
* Extract token usage from LangChain response
*/
private extractTokens;
}
export {};
//# sourceMappingURL=callback.d.ts.map