onnxruntime-react-native
Version:
ONNX Runtime bridge for react native
157 lines (155 loc) • 5.02 kB
JavaScript
;
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.onnxruntimeBackend = void 0;
var _onnxruntimeCommon = require("onnxruntime-common");
var _reactNative = require("react-native");
var _binding = require("./binding");
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
const tensorTypeToTypedArray = type => {
switch (type) {
case 'float32':
return Float32Array;
case 'int8':
return Int8Array;
case 'uint8':
return Uint8Array;
case 'int16':
return Int16Array;
case 'int32':
return Int32Array;
case 'bool':
return Int8Array;
case 'float64':
return Float64Array;
case 'int64':
/* global BigInt64Array */
/* eslint no-undef: ["error", { "typeof": true }] */
return BigInt64Array;
default:
throw new Error(`unsupported type: ${type}`);
}
};
const normalizePath = path => {
// remove 'file://' prefix in iOS
if (_reactNative.Platform.OS === 'ios' && path.toLowerCase().startsWith('file://')) {
return path.substring(7);
}
return path;
};
class OnnxruntimeSessionHandler {
#inferenceSession;
#key;
#pathOrBuffer;
get inputMetadata() {
throw new Error('Getting model metadata is currently not implemented for react-native backend.');
}
get outputMetadata() {
throw new Error('Getting model metadata is currently not implemented for react-native backend.');
}
constructor(pathOrBuffer) {
this.#inferenceSession = _binding.binding;
this.#pathOrBuffer = pathOrBuffer;
this.#key = '';
this.inputNames = [];
this.outputNames = [];
}
async loadModel(options) {
try {
let results;
// load a model
if (typeof this.#pathOrBuffer === 'string') {
// load model from model path
results = await this.#inferenceSession.loadModel(normalizePath(this.#pathOrBuffer), options);
} else {
// load model from buffer
if (!this.#inferenceSession.loadModelFromBlob) {
throw new Error('Native module method "loadModelFromBlob" is not defined');
}
const modelBlob = _binding.jsiHelper.storeArrayBuffer(this.#pathOrBuffer.buffer);
results = await this.#inferenceSession.loadModelFromBlob(modelBlob, options);
}
// resolve promise if onnxruntime session is successfully created
this.#key = results.key;
this.inputNames = results.inputNames;
this.outputNames = results.outputNames;
} catch (e) {
throw new Error(`Can't load a model: ${e.message}`);
}
}
async dispose() {
return this.#inferenceSession.dispose(this.#key);
}
startProfiling() {
// TODO: implement profiling
}
endProfiling() {
// TODO: implement profiling
}
async run(feeds, fetches, options) {
const outputNames = [];
for (const name in fetches) {
if (Object.prototype.hasOwnProperty.call(fetches, name)) {
if (fetches[name]) {
throw new Error('Preallocated output is not supported and only names as string array is allowed as parameter');
}
outputNames.push(name);
}
}
const input = this.encodeFeedsType(feeds);
const results = await this.#inferenceSession.run(this.#key, input, outputNames, options);
const output = this.decodeReturnType(results);
return output;
}
encodeFeedsType(feeds) {
const returnValue = {};
for (const key in feeds) {
if (Object.hasOwnProperty.call(feeds, key)) {
let data;
if (Array.isArray(feeds[key].data)) {
data = feeds[key].data;
} else {
const buffer = feeds[key].data.buffer;
data = _binding.jsiHelper.storeArrayBuffer(buffer);
}
returnValue[key] = {
dims: feeds[key].dims,
type: feeds[key].type,
data
};
}
}
return returnValue;
}
decodeReturnType(results) {
const returnValue = {};
for (const key in results) {
if (Object.hasOwnProperty.call(results, key)) {
let tensorData;
if (Array.isArray(results[key].data)) {
tensorData = results[key].data;
} else {
const buffer = _binding.jsiHelper.resolveArrayBuffer(results[key].data);
const typedArray = tensorTypeToTypedArray(results[key].type);
tensorData = new typedArray(buffer, buffer.byteOffset, buffer.byteLength / typedArray.BYTES_PER_ELEMENT);
}
returnValue[key] = new _onnxruntimeCommon.Tensor(results[key].type, tensorData, results[key].dims);
}
}
return returnValue;
}
}
class OnnxruntimeBackend {
async init() {
return Promise.resolve();
}
async createInferenceSessionHandler(pathOrBuffer, options) {
const handler = new OnnxruntimeSessionHandler(pathOrBuffer);
await handler.loadModel(options || {});
return handler;
}
}
const onnxruntimeBackend = exports.onnxruntimeBackend = new OnnxruntimeBackend();
//# sourceMappingURL=backend.js.map