inference-server
Version:
Libraries and server to build AI applications. Adapters to various native bindings allowing local inference. Integrate it with your application, or use as a microservice.
23 lines • 895 B
JavaScript
import { builtInEngineNames } from '../engines/index.js';
const modelIdPattern = /^[a-zA-Z0-9_\-\.]+$/;
export function validateModelId(id) {
if (!modelIdPattern.test(id)) {
throw new Error(`Model "${id}" has invalid name; requires ${modelIdPattern}`);
}
}
export function validateModelOptions(id, modelOptions) {
validateModelId(id);
if (!modelOptions.engine) {
throw new Error(`Model "${id}" must have an engine`);
}
const isSourceMissing = !('file' in modelOptions && modelOptions.file) &&
!modelOptions.url &&
!modelOptions.location;
if (builtInEngineNames.includes(modelOptions.engine) && isSourceMissing) {
throw new Error(`Model "${id}" must have either file or url`);
}
if (!modelOptions.task) {
throw new Error(`Model "${id}" must have a task`);
}
}
//# sourceMappingURL=validateModelOptions.js.map