UNPKG

@genkit-ai/checks

Version:

Google Checks AI Safety plugins for classifying the safety of text against Checks AI safety policies.

130 lines 4.07 kB
"use strict"; var __defProp = Object.defineProperty; var __getOwnPropDesc = Object.getOwnPropertyDescriptor; var __getOwnPropNames = Object.getOwnPropertyNames; var __hasOwnProp = Object.prototype.hasOwnProperty; var __export = (target, all) => { for (var name in all) __defProp(target, name, { get: all[name], enumerable: true }); }; var __copyProps = (to, from, except, desc) => { if (from && typeof from === "object" || typeof from === "function") { for (let key of __getOwnPropNames(from)) if (!__hasOwnProp.call(to, key) && key !== except) __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); } return to; }; var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); var evaluation_exports = {}; __export(evaluation_exports, { checksEvaluators: () => checksEvaluators }); module.exports = __toCommonJS(evaluation_exports); var import_genkit = require("genkit"); var import_tracing = require("genkit/tracing"); var import_metrics = require("./metrics"); function checksEvaluators(ai, auth, metrics, projectId) { const policy_configs = metrics.map( (metric) => { const metricType = (0, import_metrics.isConfig)(metric) ? metric.type : metric; const threshold = (0, import_metrics.isConfig)(metric) ? metric.threshold : void 0; return { type: metricType, threshold }; } ); return createPolicyEvaluator(projectId, auth, ai, policy_configs); } const ResponseSchema = import_genkit.z.object({ policyResults: import_genkit.z.array( import_genkit.z.object({ policyType: import_genkit.z.string(), score: import_genkit.z.number(), violationResult: import_genkit.z.string() }) ) }); function createPolicyEvaluator(projectId, auth, ai, policy_config) { return ai.defineEvaluator( { name: "checks/guardrails", displayName: "checks/guardrails", definition: `Evaluates input text against the Checks ${policy_config.map((policy) => policy.type)} policies.` }, async (datapoint) => { const partialRequest = { input: { text_input: { content: datapoint.output } }, policies: policy_config.map((config) => { return { policy_type: config.type, threshold: config.threshold }; }) }; const response = await checksEvalInstance( ai, projectId, auth, partialRequest, ResponseSchema ); const evaluationResults = response.policyResults.map((result) => { return { id: result.policyType, score: result.score, details: { reasoning: `Status ${result.violationResult}` } }; }); return { evaluation: evaluationResults, testCaseId: datapoint.testCaseId }; } ); } async function checksEvalInstance(ai, projectId, auth, partialRequest, responseSchema) { return await (0, import_tracing.runInNewSpan)( ai, { metadata: { name: "EvaluationService#evaluateInstances" } }, async (metadata, _otSpan) => { const request = { ...partialRequest }; metadata.input = request; const client = await auth.getClient(); const url = "https://checks.googleapis.com/v1alpha/aisafety:classifyContent"; const response = await client.request({ url, method: "POST", body: JSON.stringify(request), headers: { "x-goog-user-project": projectId, "Content-Type": "application/json" } }); metadata.output = response.data; try { return responseSchema.parse(response.data); } catch (e) { throw new Error(`Error parsing ${url} API response: ${e}`); } } ); } // Annotate the CommonJS export names for ESM import in node: 0 && (module.exports = { checksEvaluators }); //# sourceMappingURL=evaluation.js.map