@genkit-ai/checks
Version:
Google Checks AI Safety plugins for classifying the safety of text against Checks AI safety policies.
41 lines (39 loc) • 1.65 kB
text/typescript
/**
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Currently supported Checks AI Safety policies.
*/
declare enum ChecksEvaluationMetricType {
DANGEROUS_CONTENT = "DANGEROUS_CONTENT",
PII_SOLICITING_RECITING = "PII_SOLICITING_RECITING",
HARASSMENT = "HARASSMENT",
SEXUALLY_EXPLICIT = "SEXUALLY_EXPLICIT",
HATE_SPEECH = "HATE_SPEECH",
MEDICAL_INFO = "MEDICAL_INFO",
VIOLENCE_AND_GORE = "VIOLENCE_AND_GORE",
OBSCENITY_AND_PROFANITY = "OBSCENITY_AND_PROFANITY"
}
/**
* Checks evaluation metric config. Use `threshold` to override the default violation threshold.
* The value of `metricSpec` will be included in the request to the API. See the API documentation
*/
type ChecksEvaluationMetricConfig = {
type: ChecksEvaluationMetricType;
threshold?: number;
};
type ChecksEvaluationMetric = ChecksEvaluationMetricType | ChecksEvaluationMetricConfig;
declare function isConfig(config: ChecksEvaluationMetric): config is ChecksEvaluationMetricConfig;
export { type ChecksEvaluationMetric, type ChecksEvaluationMetricConfig, ChecksEvaluationMetricType, isConfig };