@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
86 lines • 3.25 kB
TypeScript
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { ObservabilityCollector } from "../observability/logger";
import { ToolkitConfig } from "../core/types";
/**
* Instrumenta qualquer LLM do LangChain com Guards, Evals e Observability da DOOOR.
*
* Esta é a abordagem **recomendada** para usar o DOOOR AI Toolkit - funciona com
* QUALQUER provider (OpenAI, Google, Anthropic, Cohere, etc).
*
* @param provider - Qualquer instância de BaseChatModel do LangChain
* @param toolkitConfig - Configuração do DOOOR Toolkit (guards, evals, observability)
* @returns O provider instrumentado com callbacks da DOOOR
*
* @example
* ```typescript
* import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
* import { dooorChatGuard } from "@dooor-ai/toolkit";
* import { PromptInjectionGuard, ToxicityGuard } from "@dooor-ai/toolkit/guards";
* import { LatencyEval } from "@dooor-ai/toolkit/evals";
*
* // 1. Crie seu provider LangChain normalmente
* const baseProvider = new ChatGoogleGenerativeAI({
* model: "gemini-2.5-flash",
* apiKey: process.env.GEMINI_API_KEY,
* temperature: 0,
* });
*
* // 2. Instrumenta com DOOOR Toolkit
* const llm = dooorChatGuard(baseProvider, {
* apiKey: "cortexdb://cortexdb_adm123@35.223.201.25:8000/my_evals",
* providerName: "gemini-test",
* guards: [
* new PromptInjectionGuard(),
* new ToxicityGuard(), // Uses Gemini via CortexDB proxy
* ],
* evals: [new LatencyEval()],
* observability: true,
* });
*
* // 3. Use normalmente - Guards + Evals funcionam automaticamente!
* const response = await llm.invoke("What is the capital of France?");
* ```
*
* @example Funciona com QUALQUER provider:
* ```typescript
* // OpenAI
* const openai = new ChatOpenAI({ model: "gpt-4o", apiKey: "..." });
* const llm1 = dooorChatGuard(openai, toolkitConfig);
*
* // Google
* const google = new ChatGoogleGenerativeAI({ model: "gemini-2.5-flash", apiKey: "..." });
* const llm2 = dooorChatGuard(google, toolkitConfig);
*
* // Anthropic
* const claude = new ChatAnthropic({ model: "claude-3-5-sonnet", apiKey: "..." });
* const llm3 = dooorChatGuard(claude, toolkitConfig);
*
* // TODOS funcionam igual!
* ```
*/
export declare function dooorChatGuard<T extends BaseChatModel>(provider: T, toolkitConfig: ToolkitConfig): T;
/**
* Helper para acessar o ObservabilityCollector de um provider instrumentado
*/
export declare function getObservabilityCollector(provider: BaseChatModel): ObservabilityCollector | undefined;
/**
* Helper para habilitar/desabilitar observability em runtime
*/
export declare function setObservabilityEnabled(provider: BaseChatModel, enabled: boolean): void;
/**
* Helper para extrair callbacks do provider instrumentado
* Use isso para passar callbacks para agents/graphs do LangGraph
*
* @example
* ```typescript
* const llm = dooorChatGuard(baseProvider, config);
* const agent = createReactAgent({ llm, tools });
*
* // Passa os callbacks DOOOR para o agent
* const result = await agent.invoke(input, {
* callbacks: getCallbacks(llm)
* });
* ```
*/
export declare function getCallbacks(provider: BaseChatModel): any[];
//# sourceMappingURL=langchain.d.ts.map