alwaysai
Version:
The alwaysAI command-line interface (CLI)
160 lines • 6.39 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", { value: true });
exports.modelConvert = exports.OUTPUT_FORMAT_OPTIONS = exports.OutputFormat = void 0;
const alwayscli_1 = require("@alwaysai/alwayscli");
const logSymbols = require("log-symbols");
const path_1 = require("path");
const tempy = require("tempy");
const user_1 = require("../../components/user");
const app_1 = require("../../core/app");
const model_1 = require("../../core/model");
const project_1 = require("../../core/project");
const infrastructure_1 = require("../../infrastructure");
const util_1 = require("../../util");
exports.OutputFormat = (0, util_1.keyMirror)({
'tensor-rt': null
// hailo: null,
});
exports.OUTPUT_FORMAT_OPTIONS = Object.values(exports.OutputFormat);
exports.modelConvert = (0, alwayscli_1.CliLeaf)({
name: 'convert',
description: 'Convert an alwaysAI model to a different format',
positionalInput: (0, alwayscli_1.CliStringInput)({
placeholder: 'model ID e.g. alwaysai/mobilenet_ssd',
required: true
}),
namedInputs: {
format: (0, alwayscli_1.CliOneOfInput)({
description: 'The output format of the model conversion',
required: true,
values: exports.OUTPUT_FORMAT_OPTIONS
}),
'output-id': (0, alwayscli_1.CliStringInput)({
description: 'Model ID of the converted model',
required: false
}),
version: (0, alwayscli_1.CliNumberInput)({
description: 'Version of the model to convert',
required: false
}),
'batch-size': (0, alwayscli_1.CliNumberInput)({
description: 'Batch size if converting to tensor-rt',
required: false
})
},
async action(modelId, { format, 'output-id': outputId, version, 'batch-size': batchSize }) {
await convertModel({ modelId, format, outputId, version, batchSize });
}
});
async function convertModel(opts) {
const { modelId, format, outputId, batchSize } = opts;
await (0, user_1.checkUserIsLoggedInComponent)({ yes: true });
await (0, project_1.requirePaidPlan)();
let version = opts.version;
try {
const modelDetails = await (0, infrastructure_1.CliRpcClient)().getModelVersion({ id: modelId });
if (version === undefined) {
version = modelDetails.version;
}
if (!model_1.modelPackageCache.has(modelId, version)) {
await (0, model_1.downloadModelPackageToCache)(modelId, version);
}
}
catch (err) {
util_1.logger.error(`Model ${modelId}: ${version} not found: ${(0, util_1.stringifyError)(err)}`);
throw new alwayscli_1.CliTerseError(`Model ${modelId}: ${version} not found!`);
}
if (batchSize && format !== 'tensor-rt') {
throw new alwayscli_1.CliTerseError(`Batch size parameter is only supported for tensor-rt conversion. Please remove the batch size parameter.`);
}
if (outputId) {
model_1.ModelId.parse(outputId);
}
switch (format) {
case 'tensor-rt':
await convertTensorrt({ modelId, version, outputId, batchSize });
break;
// case 'hailo':
// await convertHailo(modelId, outputId); //disable hailo conversion
// break;
default:
throw new alwayscli_1.CliTerseError(`${format} is not a valid format. Choose one from the following: <${exports.OUTPUT_FORMAT_OPTIONS}>`);
}
}
// async function convertHailo(modelId: string, outputId: string) {
// echo('Starting conversion to Hailo...');
// const hailoDockerImage = 'tonyjesudoss/hailomodelconvertor'; // will update with new image once released.
// await JsSpawner().runForeground({
// exe: 'docker',
// args: [
// 'run',
// '--rm',
// '-it',
// '--env',
// `MODEL_ID=${modelId}`,
// '--env',
// `OUTPUT_MODEL_ID=${outputId}`,
// hailoDockerImage,
// ],
// });
// }
async function convertTensorrt(props) {
(0, util_1.echo)('Starting conversion to TensorRT...');
const { modelId, version, outputId, batchSize } = props;
// TODO: enable conversion on remote device
const targetHardware = await (0, app_1.getTargetHardwareType)({});
if (!targetHardware.includes('jetson')) {
throw new alwayscli_1.CliTerseError('NVIDIA Jetson required to convert to TensorRT');
}
const tmpDir = tempy.directory();
const tmpSpawner = (0, util_1.JsSpawner)({ path: tmpDir });
await tmpSpawner.mkdirp();
let bashArgs = `python3 app.py --model-id ${modelId}`;
if (outputId) {
bashArgs = `${bashArgs} --output-id ${outputId}`;
}
if (batchSize) {
bashArgs = `${bashArgs} --batch-size ${batchSize}`;
}
try {
await (0, app_1.appInstallModel)(tmpSpawner, modelId, version);
const dockerImageId = `alwaysai/model-conversion:tensorrt-${targetHardware}`;
await (0, util_1.JsSpawner)().runForeground({
exe: 'docker',
args: ['pull', dockerImageId]
});
const result = await (0, util_1.DockerSpawner)({
dockerImageId,
targetHardware,
volumes: [
`${(0, path_1.join)(tmpDir, 'models')}:/convert/models`,
`${(0, path_1.join)(process.cwd(), 'out')}:/convert/out`
]
}).runForeground({
cwd: '/convert',
exe: 'bash',
args: ['-c', `${bashArgs}`],
superuser: true
});
if (result !== undefined) {
util_1.logger.error(`Model conversion failed with error code ${result}`);
throw new Error(`Model conversion failed with error code ${result}`);
}
if (outputId) {
const newId = model_1.ModelId.parse(outputId);
(0, util_1.echo)(`${logSymbols.success} Converted model saved to ./out/${newId.publisher}/${newId.name}`);
}
else {
(0, util_1.echo)(`${logSymbols.success} Converted model saved to ./out/`);
}
}
catch (err) {
util_1.logger.error((0, util_1.stringifyError)(err));
throw new alwayscli_1.CliTerseError('Model conversion failed! See errors in above logs.');
}
finally {
// Clean up temp directory
await tmpSpawner.rimraf();
}
}
//# sourceMappingURL=convert.js.map