alwaysai
Version:
The alwaysAI command-line interface (CLI)
318 lines • 14.7 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.modelConfigure = void 0;
const logSymbols = require("log-symbols");
const alwayscli_1 = require("@alwaysai/alwayscli");
const util_1 = require("../../util");
const model_configuration_schemas_1 = require("@alwaysai/model-configuration-schemas");
const model_package_json_file_1 = require("../../core/model/model-package-json-file");
const infrastructure_1 = require("../../infrastructure");
const ModelDeviceValues = Object.values((0, util_1.keyMirror)(model_configuration_schemas_1.tensorrtDeviceEnum));
const ModelArchitectureValues = Object.values((0, util_1.keyMirror)(Object.assign(Object.assign(Object.assign({}, model_configuration_schemas_1.tensorrtArchitectureObjectDetectionEnum), model_configuration_schemas_1.hailoArchitectureEnum), model_configuration_schemas_1.onnxArchitectureObjectDetectionEnum)));
exports.modelConfigure = (0, alwayscli_1.CliLeaf)({
name: 'configure',
description: 'Generate or modify the model configuration of the application.',
namedInputs: {
name: (0, alwayscli_1.CliStringInput)({
description: 'The model name to be used to generate the model ID',
required: true
}),
// Required fields for all frameworks
framework: (0, alwayscli_1.CliOneOfInput)({
description: '',
required: true,
values: model_configuration_schemas_1.modelFrameworkValues
}),
model_file: (0, alwayscli_1.CliStringInput)({
description: 'Path to model binary file.'
}),
mean: (0, alwayscli_1.CliNumberArrayInput)({
description: 'The average pixel intensity in the red, green, and blue channels of the training dataset.'
}),
scalefactor: (0, alwayscli_1.CliNumberInput)({
description: 'Factor to scale pixel intensities by.'
}),
size: (0, alwayscli_1.CliNumberArrayInput)({
description: 'The input size of the neural network.'
}),
purpose: (0, alwayscli_1.CliOneOfInput)({
description: 'Computer vision purpose of the model.',
required: true,
values: model_configuration_schemas_1.modelPurposeValues
}),
crop: (0, alwayscli_1.CliFlagInput)({
description: 'Crop before resize?'
}),
// Optional fields
config_file: (0, alwayscli_1.CliStringInput)({
description: 'Path to model structure.'
}),
label_file: (0, alwayscli_1.CliStringInput)({
description: 'File containing labels for each class index.'
}),
colors_file: (0, alwayscli_1.CliStringInput)({
description: 'File containing colors to be used by each class index.'
}),
swaprb: (0, alwayscli_1.CliFlagInput)({
description: 'Swap red and blue channels after image blob generation'
}),
softmax: (0, alwayscli_1.CliFlagInput)({
description: 'Apply softmax to the output of the neural network? Boolean true/false'
}),
batch_size: (0, alwayscli_1.CliNumberInput)({
description: 'The inference batch size of the model'
}),
output_layer_names: (0, alwayscli_1.CliStringArrayInput)({
description: 'List of output layers provided in advance',
placeholder: '<>'
}),
device: (0, alwayscli_1.CliOneOfInput)({
description: 'Define the device model is intended to be used on.',
values: ModelDeviceValues
}),
architecture: (0, alwayscli_1.CliOneOfInput)({
description: 'Define the architecture type intended to be used.',
values: ModelArchitectureValues
}),
quantize_input: (0, alwayscli_1.CliFlagInput)({
description: 'Quantize input? Boolean true/false.'
}),
quantize_output: (0, alwayscli_1.CliFlagInput)({
description: 'Quantize output? Boolean true/false.'
}),
input_format: (0, alwayscli_1.CliOneOfInput)({
description: 'Define the input format of the data.',
values: model_configuration_schemas_1.hailoFormatValues
}),
output_format: (0, alwayscli_1.CliOneOfInput)({
description: 'Define the output format of the data.',
values: model_configuration_schemas_1.hailoFormatValues
})
},
async action(_, opts) {
const { name, framework, model_file, config_file, mean, scalefactor, size, purpose, crop, label_file, colors_file, swaprb, softmax, batch_size, output_layer_names, device, architecture, quantize_input, quantize_output, input_format, output_format } = opts;
let modelParameters;
switch (framework) {
case 'tensorflow': {
modelParameters = {
framework_type: 'tensorflow',
model_file: model_file || '',
label_file: label_file || '',
mean: mean || [0, 0, 0],
scalefactor: scalefactor || 1,
size: size || [300, 300],
purpose,
crop,
config_file: config_file || '',
colors_file: colors_file || '',
swaprb,
softmax
};
break;
}
case 'caffe': {
modelParameters = {
framework_type: 'caffe',
config_file: config_file || '',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
scalefactor: scalefactor || 1,
mean: mean || [0, 0, 0],
crop,
swaprb,
softmax,
purpose,
output_layer_names: output_layer_names || ['', '']
};
break;
}
case 'enet': {
modelParameters = {
framework_type: 'enet',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
colors_file: colors_file || '',
scalefactor: scalefactor || 1,
mean: mean || [0, 0, 0],
crop,
swaprb,
purpose
};
break;
}
case 'darknet': {
modelParameters = {
framework_type: 'darknet',
config_file: config_file || '',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
colors_file: colors_file || '',
scalefactor: scalefactor || 1,
mean: mean || [0, 0, 0],
crop,
swaprb,
purpose,
output_layer_names: output_layer_names || null
};
break;
}
case 'onnx': {
if (purpose === 'ObjectDetection') {
if (architecture &&
!(architecture in model_configuration_schemas_1.onnxArchitectureObjectDetectionEnum)) {
throw new alwayscli_1.CliUsageError(`Architecture not supported! (${model_configuration_schemas_1.onnxArchitectureObjectDetectionValues})`);
}
modelParameters = {
framework_type: 'onnx',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
colors_file: colors_file || '',
scalefactor: scalefactor || 1,
crop,
swaprb,
purpose,
mean: mean || [0, 0, 0],
output_layer_names: output_layer_names || null,
architecture: architecture
};
}
else {
// Purpose other than ObjectDetection
if (architecture) {
throw new alwayscli_1.CliUsageError(`Parameter --architecture not supported for purpose ${purpose}`);
}
modelParameters = {
framework_type: 'onnx',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
colors_file: colors_file || '',
scalefactor: scalefactor || 1,
crop,
swaprb,
purpose,
mean: mean || [0, 0, 0],
output_layer_names: output_layer_names || null
};
}
if (batch_size) {
modelParameters.batch_size = batch_size;
}
break;
}
case 'tensor-rt': {
if (!batch_size) {
throw new alwayscli_1.CliUsageError(`Parameter --batch_size required!`);
}
if (device && !(device in model_configuration_schemas_1.tensorrtDeviceEnum)) {
throw new alwayscli_1.CliUsageError(`Device not supported! (${model_configuration_schemas_1.tensorrtDeviceValues})`);
}
if (purpose === 'ObjectDetection') {
if (architecture &&
!(architecture in model_configuration_schemas_1.tensorrtArchitectureObjectDetectionEnum)) {
throw new alwayscli_1.CliUsageError(`Architecture not supported! (${model_configuration_schemas_1.tensorrtArchitectureObjectDetectionValues})`);
}
modelParameters = {
framework_type: 'tensor-rt',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
scalefactor: scalefactor || 1,
mean: mean || [0, 0, 0],
crop,
swaprb,
purpose,
batch_size,
colors_file: colors_file || '',
device: device,
architecture: architecture
};
}
else {
// Purpose other than ObjectDetection
if (architecture) {
throw new alwayscli_1.CliUsageError(`Parameter --architecture not supported for purpose ${purpose}`);
}
modelParameters = {
framework_type: 'tensor-rt',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
scalefactor: scalefactor || 1,
mean: mean || [0, 0, 0],
crop,
swaprb,
purpose,
batch_size,
colors_file: colors_file || '',
device: device
};
}
break;
}
case 'hailo': {
if (!architecture || !(architecture in model_configuration_schemas_1.hailoArchitectureEnum)) {
throw new alwayscli_1.CliUsageError(`Parameter --architecture required! (${model_configuration_schemas_1.hailoArchitectureValues})`);
}
if (!purpose || !(purpose in model_configuration_schemas_1.hailoPurposeEnum)) {
throw new alwayscli_1.CliUsageError(`Parameter --purpose required! (${model_configuration_schemas_1.hailoPurposeValues})`);
}
modelParameters = {
framework_type: 'hailo',
architecture: architecture,
quantize_input: quantize_input || true,
quantize_output: quantize_output || true,
input_format: input_format || 'auto',
output_format: output_format || 'auto',
size: size || [300, 300],
model_file: model_file || '',
label_file: label_file || '',
purpose: purpose,
crop,
swaprb,
mean: mean || [0, 0, 0],
scalefactor: scalefactor || 1
};
break;
}
default: {
throw new Error('Unsupported framework.');
}
}
const { username } = await (0, infrastructure_1.CliAuthenticationClient)().getInfo();
const newModel = {
accuracy: '',
dataset: '',
description: '',
id: `${username}/${name}`,
inference_time: null,
license: '',
mean_average_precision_top_1: null,
mean_average_precision_top_5: null,
public: false,
website_url: '',
model_parameters: modelParameters
};
(0, model_configuration_schemas_1.validateModel)(newModel);
if (model_configuration_schemas_1.validateModel.errors) {
(0, util_1.echo)(JSON.stringify(model_configuration_schemas_1.validateModel.errors, _, 2));
throw new alwayscli_1.CliTerseError('Model package contents are invalid!');
}
const message = `Write alwaysai.model.json file`;
const modelPkg = (0, model_package_json_file_1.ModelPackageJsonFile)(process.cwd());
try {
modelPkg.write(newModel);
(0, util_1.echo)(`${logSymbols.success} ${message}`);
}
catch (exception) {
(0, util_1.echo)(`${logSymbols.error} ${message}`);
util_1.logger.error(exception);
throw new alwayscli_1.CliTerseError(`Failed to write model package! ${exception}`);
}
}
});
//# sourceMappingURL=configure.js.map