UNPKG

@genkit-ai/checks

Version:

Google Checks AI Safety plugins for classifying the safety of text against Checks AI safety policies.

45 lines 1.67 kB
import { Guardrails } from "./guardrails"; function checksMiddleware(options) { const guardrails = new Guardrails(options.auth, options?.projectId); const classifyContent = async (content) => { const response = await guardrails.classifyContent(content, options.metrics); const violatedPolicies = response.policyResults.filter( (policy) => policy.violationResult === "VIOLATIVE" ); return violatedPolicies; }; return async (req, next) => { for (const message of req.messages) { for (const content of message.content) { if (content.text) { const violatedPolicies = await classifyContent(content.text); if (violatedPolicies.length > 0) { return { finishReason: "blocked", finishMessage: `Model input violated Checks policies: [${violatedPolicies.map((result) => result.policyType).join(" ")}], further processing blocked.` }; } } } } const generatedContent = await next(req); for (const candidate of generatedContent.candidates ?? []) { for (const content of candidate.message.content ?? []) { if (content.text) { const violatedPolicies = await classifyContent(content.text); if (violatedPolicies.length > 0) { return { finishReason: "blocked", finishMessage: `Model output violated Checks policies: [${violatedPolicies.map((result) => result.policyType).join(" ")}], output blocked.` }; } } } } return generatedContent; }; } export { checksMiddleware }; //# sourceMappingURL=middleware.mjs.map