@dooor-ai/toolkit
Version:
Guards, Evals & Observability for AI applications - works seamlessly with LangChain/LangGraph
35 lines (34 loc) • 1.34 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.runGuards = runGuards;
const types_1 = require("../core/types");
/**
* Executes a list of guards against a text input.
* Useful for manual HTTP integrations outside of LangChain.
*
* @param guards Guards to run
* @param inputText Text to validate (already preprocessed by the caller)
* @param options Additional execution options
* @returns Guard execution summaries (name/result/latency)
* @throws GuardBlockedException if a guard blocks the request (configurable)
*/
async function runGuards(guards = [], inputText, options = {}) {
const summaries = [];
for (const guard of guards) {
if (!guard || !guard.isEnabled())
continue;
const startedAt = Date.now();
const result = await guard.validate(inputText, options.metadata);
const latency = Date.now() - startedAt;
summaries.push({
name: guard.name,
result,
latency,
});
const shouldBlock = !result.passed && guard.shouldBlock();
if (shouldBlock && options.throwOnBlock !== false) {
throw new types_1.GuardBlockedException(guard.name, result.reason || "Guard blocked the request", result.severity || "high", result.metadata);
}
}
return summaries;
}