@genkit-ai/checks
Version:
Google Checks AI Safety plugins for classifying the safety of text against Checks AI safety policies.
56 lines • 1.43 kB
JavaScript
import { z } from "genkit";
import { isConfig } from "./metrics";
const GUARDRAILS_URL = "https://checks.googleapis.com/v1alpha/aisafety:classifyContent";
const ResponseSchema = z.object({
policyResults: z.array(
z.object({
policyType: z.string(),
score: z.number().optional(),
violationResult: z.string()
})
)
});
class Guardrails {
auth;
projectId;
constructor(auth, projectId) {
this.auth = auth;
this.projectId = projectId;
}
async classifyContent(content, policies) {
const body = {
input: {
text_input: {
content
}
},
policies: policies.map((policy) => {
const policyType = isConfig(policy) ? policy.type : policy;
const threshold = isConfig(policy) ? policy.threshold : void 0;
return {
policy_type: policyType,
threshold
};
})
};
const client = await this.auth.getClient();
const response = await client.request({
url: GUARDRAILS_URL,
method: "POST",
body: JSON.stringify(body),
headers: {
"x-goog-user-project": this.projectId,
"Content-Type": "application/json"
}
});
try {
return ResponseSchema.parse(response.data);
} catch (e) {
throw new Error(`Error parsing ${GUARDRAILS_URL} API response: ${e}`);
}
}
}
export {
Guardrails
};
//# sourceMappingURL=guardrails.mjs.map