@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
101 lines (85 loc) ⢠3.16 kB
text/typescript
/**
* Multi-Provider Example
*
* This example demonstrates how dooorChatGuard works with ANY LangChain provider!
*
* Demonstrates:
* - Google Gemini (ChatGoogleGenerativeAI)
* - OpenAI (ChatOpenAI) - commented out
* - Anthropic (ChatAnthropic) - commented out
* - Same guards/evals work across all providers
*/
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
// Uncomment these if you want to test other providers:
// import { ChatOpenAI } from "@langchain/openai";
// import { ChatAnthropic } from "@langchain/anthropic";
import { dooorChatGuard } from "../src";
import { PromptInjectionGuard } from "../src/guards/prompt-injection";
import { LatencyEval } from "../src/evals/latency";
async function main() {
console.log("š DOOOR AI Toolkit - Multi-Provider Example\n");
console.log("Demonstrating that dooorChatGuard works with ANY LangChain provider!\n");
// Shared toolkit config
const toolkitConfig = {
apiKey: process.env.CORTEXDB_CONNECTION_STRING ||
"cortexdb://cortexdb_adm123@localhost:8000/my_evals",
providerName: "gemini",
project: "multi-provider-example",
guards: [new PromptInjectionGuard({ threshold: 0.7 })],
evals: [new LatencyEval({ threshold: 3000 })],
observability: true,
};
// Example 1: Google Gemini
console.log("š± Provider 1: Google Gemini");
const geminiBase = new ChatGoogleGenerativeAI({
model: "gemini-2.0-flash-exp",
apiKey: process.env.GEMINI_API_KEY,
temperature: 0,
});
const geminiLLM = dooorChatGuard(geminiBase, toolkitConfig);
try {
const response1 = await geminiLLM.invoke("What is 2+2?");
console.log("ā
Gemini Response:", response1.content);
} catch (error: any) {
console.error("ā Error:", error.message);
}
console.log("\n---\n");
// Example 2: OpenAI (uncomment to test)
/*
console.log("š¤ Provider 2: OpenAI GPT-4");
const openaiBase = new ChatOpenAI({
model: "gpt-4o-mini",
apiKey: process.env.OPENAI_API_KEY,
temperature: 0,
});
const openaiLLM = dooorChatGuard(openaiBase, toolkitConfig);
try {
const response2 = await openaiLLM.invoke("What is 2+2?");
console.log("ā
OpenAI Response:", response2.content);
} catch (error: any) {
console.error("ā Error:", error.message);
}
console.log("\n---\n");
*/
// Example 3: Anthropic Claude (uncomment to test)
/*
console.log("š§ Provider 3: Anthropic Claude");
const claudeBase = new ChatAnthropic({
model: "claude-3-5-sonnet-20241022",
apiKey: process.env.ANTHROPIC_API_KEY,
temperature: 0,
});
const claudeLLM = dooorChatGuard(claudeBase, toolkitConfig);
try {
const response3 = await claudeLLM.invoke("What is 2+2?");
console.log("ā
Claude Response:", response3.content);
} catch (error: any) {
console.error("ā Error:", error.message);
}
*/
console.log("\nā
Multi-provider example completed!");
console.log("š” The same guards, evals, and observability work across ALL providers!");
console.log("š No need for provider-specific implementations!");
}
// Run the example
main().catch(console.error);