@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
61 lines (52 loc) • 1.74 kB
text/typescript
import { Guard } from "./base";
import { GuardBlockedException, GuardExecutionSummary } from "../core/types";
export interface RunGuardsOptions {
/**
* Arbitrary metadata passed down to guards (e.g. user/session info)
*/
metadata?: Record<string, any>;
/**
* When false, guards won't throw GuardBlockedException on failure.
* Consumers can inspect the returned results to decide what to do.
* Defaults to true (throws).
*/
throwOnBlock?: boolean;
}
/**
* Executes a list of guards against a text input.
* Useful for manual HTTP integrations outside of LangChain.
*
* @param guards Guards to run
* @param inputText Text to validate (already preprocessed by the caller)
* @param options Additional execution options
* @returns Guard execution summaries (name/result/latency)
* @throws GuardBlockedException if a guard blocks the request (configurable)
*/
export async function runGuards(
guards: Guard[] = [],
inputText: string,
options: RunGuardsOptions = {}
): Promise<GuardExecutionSummary[]> {
const summaries: GuardExecutionSummary[] = [];
for (const guard of guards) {
if (!guard || !guard.isEnabled()) continue;
const startedAt = Date.now();
const result = await guard.validate(inputText, options.metadata);
const latency = Date.now() - startedAt;
summaries.push({
name: guard.name,
result,
latency,
});
const shouldBlock = !result.passed && guard.shouldBlock();
if (shouldBlock && options.throwOnBlock !== false) {
throw new GuardBlockedException(
guard.name,
result.reason || "Guard blocked the request",
result.severity || "high",
result.metadata
);
}
}
return summaries;
}