@genkit-ai/checks
Version:
Google Checks AI Safety plugins for classifying the safety of text against Checks AI safety policies.
19 lines • 893 B
JavaScript
var ChecksEvaluationMetricType = /* @__PURE__ */ ((ChecksEvaluationMetricType2) => {
ChecksEvaluationMetricType2["DANGEROUS_CONTENT"] = "DANGEROUS_CONTENT";
ChecksEvaluationMetricType2["PII_SOLICITING_RECITING"] = "PII_SOLICITING_RECITING";
ChecksEvaluationMetricType2["HARASSMENT"] = "HARASSMENT";
ChecksEvaluationMetricType2["SEXUALLY_EXPLICIT"] = "SEXUALLY_EXPLICIT";
ChecksEvaluationMetricType2["HATE_SPEECH"] = "HATE_SPEECH";
ChecksEvaluationMetricType2["MEDICAL_INFO"] = "MEDICAL_INFO";
ChecksEvaluationMetricType2["VIOLENCE_AND_GORE"] = "VIOLENCE_AND_GORE";
ChecksEvaluationMetricType2["OBSCENITY_AND_PROFANITY"] = "OBSCENITY_AND_PROFANITY";
return ChecksEvaluationMetricType2;
})(ChecksEvaluationMetricType || {});
function isConfig(config) {
return config.type !== void 0;
}
export {
ChecksEvaluationMetricType,
isConfig
};
//# sourceMappingURL=metrics.mjs.map