@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
229 lines (205 loc) • 8.38 kB
text/typescript
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { DOOORCallbackHandler } from "../core/callback";
import { Guard } from "../guards/base";
import { Eval } from "../evals/base";
import { ObservabilityCollector, ConsoleBackend, CortexDBBackend } from "../observability/logger";
import { configureCortexDBFromConnectionString } from "../observability/cortexdb-client";
import { ToolkitConfig } from "../core/types";
/**
* Instrumenta qualquer LLM do LangChain com Guards, Evals e Observability da DOOOR.
*
* Esta é a abordagem **recomendada** para usar o DOOOR AI Toolkit - funciona com
* QUALQUER provider (OpenAI, Google, Anthropic, Cohere, etc).
*
* @param provider - Qualquer instância de BaseChatModel do LangChain
* @param toolkitConfig - Configuração do DOOOR Toolkit (guards, evals, observability)
* @returns O provider instrumentado com callbacks da DOOOR
*
* @example
* ```typescript
* import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
* import { dooorChatGuard } from "@dooor-ai/toolkit";
* import { PromptInjectionGuard, ToxicityGuard } from "@dooor-ai/toolkit/guards";
* import { LatencyEval } from "@dooor-ai/toolkit/evals";
*
* // 1. Crie seu provider LangChain normalmente
* const baseProvider = new ChatGoogleGenerativeAI({
* model: "gemini-2.5-flash",
* apiKey: process.env.GEMINI_API_KEY,
* temperature: 0,
* });
*
* // 2. Instrumenta com DOOOR Toolkit
* const llm = dooorChatGuard(baseProvider, {
* apiKey: "cortexdb://cortexdb_adm123@35.223.201.25:8000/my_evals",
* providerName: "gemini-test",
* guards: [
* new PromptInjectionGuard(),
* new ToxicityGuard(), // Uses Gemini via CortexDB proxy
* ],
* evals: [new LatencyEval()],
* observability: true,
* });
*
* // 3. Use normalmente - Guards + Evals funcionam automaticamente!
* const response = await llm.invoke("What is the capital of France?");
* ```
*
* @example Funciona com QUALQUER provider:
* ```typescript
* // OpenAI
* const openai = new ChatOpenAI({ model: "gpt-4o", apiKey: "..." });
* const llm1 = dooorChatGuard(openai, toolkitConfig);
*
* // Google
* const google = new ChatGoogleGenerativeAI({ model: "gemini-2.5-flash", apiKey: "..." });
* const llm2 = dooorChatGuard(google, toolkitConfig);
*
* // Anthropic
* const claude = new ChatAnthropic({ model: "claude-3-5-sonnet", apiKey: "..." });
* const llm3 = dooorChatGuard(claude, toolkitConfig);
*
* // TODOS funcionam igual!
* ```
*/
export function dooorChatGuard<T extends BaseChatModel>(
provider: T,
toolkitConfig: ToolkitConfig
): T {
console.log("[dooorChatGuard] Called with config:", {
hasApiKey: !!toolkitConfig?.apiKey,
providerName: toolkitConfig?.providerName,
project: toolkitConfig?.project,
observability: toolkitConfig?.observability,
guardsCount: toolkitConfig?.guards?.length || 0,
evalsCount: toolkitConfig?.evals?.length || 0,
});
// Configure CortexDB if connection string provided
if (toolkitConfig?.apiKey) {
console.log("[dooorChatGuard] Configuring CortexDB with connection string");
configureCortexDBFromConnectionString(
toolkitConfig.apiKey,
toolkitConfig.providerName,
toolkitConfig.project
);
}
// Setup observability backend
// If CortexDB is configured (apiKey provided), use CortexDBBackend
// Otherwise, fallback to ConsoleBackend for local development
const backend = toolkitConfig?.apiKey
? new CortexDBBackend(toolkitConfig.project)
: new ConsoleBackend(true);
console.log("[dooorChatGuard] Observability backend:", backend.constructor.name);
const observabilityCollector =
toolkitConfig?.observability !== false
? new ObservabilityCollector(backend)
: undefined;
console.log("[dooorChatGuard] ObservabilityCollector created:", !!observabilityCollector);
// Extract model name from provider
// @ts-ignore - accessing internal properties
const modelName = provider.modelName || provider.model || toolkitConfig.providerName || "unknown";
console.log("[dooorChatGuard] Detected model name:", modelName);
// Create DOOOR callback handler
const dooorHandler = new DOOORCallbackHandler({
guards: (toolkitConfig?.guards as Guard[]) || [],
evals: (toolkitConfig?.evals as Eval[]) || [],
outputGuards: (toolkitConfig?.outputGuards as Guard[]) || [],
observability: observabilityCollector,
evalMode: toolkitConfig?.evalMode || "async",
evalSampleRate: toolkitConfig?.evalSampleRate || 1.0,
guardFailureMode: toolkitConfig?.guardFailureMode || "throw",
modelName: modelName,
});
// Store callback handler for retrieval via getCallbacks()
const callbacksList = [dooorHandler];
console.log("[dooorChatGuard] DOOOR callback handler created");
// Attach observability collector to provider for runtime access
// @ts-ignore - adding custom property
provider.__dooorObservability = observabilityCollector;
// @ts-ignore - attach callbacks list for easy access in invoke config
provider.__dooorCallbacks = callbacksList;
// IMPORTANT: Add callbacks to provider.callbacks so LangGraph agents propagate them automatically
// This allows createReactAgent() to work WITHOUT needing getCallbacks() explicitly
// @ts-ignore - provider.callbacks exists on BaseChatModel
provider.callbacks = [...(provider.callbacks || []), ...callbacksList];
console.log("[dooorChatGuard] Callbacks added to provider.callbacks for automatic propagation");
// Override invoke to automatically inject callbacks and handle RAG
const originalInvoke = provider.invoke.bind(provider);
// @ts-ignore
provider.invoke = async function (input: any, options?: any) {
console.log("[dooorChatGuard] invoke intercepted, injecting callbacks");
console.log("[dooorChatGuard] Input type:", typeof input, Array.isArray(input) ? "array" : "");
console.log("[dooorChatGuard] Input:", JSON.stringify(input).substring(0, 200));
// Handle RAG context if provided
let modifiedInput = input;
if (options?.ragContext) {
console.log("[dooorChatGuard] RAG context detected, processing...");
// RAG will be handled by the gateway when we call invokeAI
// Store RAG context for callback handler to use
// @ts-ignore
provider.__dooorRagContext = options.ragContext;
}
const mergedOptions = {
...options,
callbacks: [...(options?.callbacks || []), ...callbacksList],
};
console.log("[dooorChatGuard] Calling with callbacks:", {
count: mergedOptions.callbacks.length,
types: mergedOptions.callbacks.map((cb: any) => cb.constructor?.name),
});
console.log("[dooorChatGuard] Calling originalInvoke...");
const result = await originalInvoke(modifiedInput, mergedOptions);
console.log("[dooorChatGuard] originalInvoke completed");
console.log("[dooorChatGuard] Result type:", typeof result);
// Clean up RAG context
// @ts-ignore
delete provider.__dooorRagContext;
return result;
};
return provider;
}
/**
* Helper para acessar o ObservabilityCollector de um provider instrumentado
*/
export function getObservabilityCollector(
provider: BaseChatModel
): ObservabilityCollector | undefined {
// @ts-ignore
return provider.__dooorObservability;
}
/**
* Helper para habilitar/desabilitar observability em runtime
*/
export function setObservabilityEnabled(
provider: BaseChatModel,
enabled: boolean
): void {
const collector = getObservabilityCollector(provider);
if (collector) {
collector.setEnabled(enabled);
}
}
/**
* Helper para extrair callbacks do provider instrumentado
* Use isso para passar callbacks para agents/graphs do LangGraph
*
* @example
* ```typescript
* const llm = dooorChatGuard(baseProvider, config);
* const agent = createReactAgent({ llm, tools });
*
* // Passa os callbacks DOOOR para o agent
* const result = await agent.invoke(input, {
* callbacks: getCallbacks(llm)
* });
* ```
*/
export function getCallbacks(provider: BaseChatModel): any[] {
// @ts-ignore
const callbacks = provider.__dooorCallbacks || provider.callbacks || [];
console.log("[getCallbacks] Returning callbacks:", {
count: callbacks.length,
types: callbacks.map((cb: any) => cb.constructor?.name || typeof cb),
});
return callbacks;
}