UNPKG

@tensorflow/tfjs-node

Version:

This repository provides native TensorFlow execution in backend JavaScript applications under the Node.js runtime, accelerated by the TensorFlow C binary under the hood. It provides the same API as [TensorFlow.js](https://js.tensorflow.org/api/latest/).

496 lines (495 loc) 24.4 kB
"use strict"; /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; var __generator = (this && this.__generator) || function (thisArg, body) { var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; function verb(n) { return function (v) { return step([n, v]); }; } function step(op) { if (f) throw new TypeError("Generator is already executing."); while (g && (g = 0, op[0] && (_ = 0)), _) try { if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; if (y = 0, t) op = [op[0] & 2, t.value]; switch (op[0]) { case 0: case 1: t = op; break; case 4: _.label++; return { value: op[1], done: false }; case 5: _.label++; y = op[1]; op = [0]; continue; case 7: op = _.ops.pop(); _.trys.pop(); continue; default: if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } if (t[2]) _.ops.pop(); _.trys.pop(); continue; } op = body.call(thisArg, _); } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; } }; Object.defineProperty(exports, "__esModule", { value: true }); exports.getNumOfSavedModels = exports.loadSavedModel = exports.TFSavedModel = exports.getSignatureDefEntryFromMetaGraphInfo = exports.getMetaGraphsFromSavedModel = exports.readSavedModelProto = exports.getEnumKeyFromValue = void 0; var tfjs_1 = require("@tensorflow/tfjs"); var fs = require("fs"); var util_1 = require("util"); var nodejs_kernel_backend_1 = require("./nodejs_kernel_backend"); var readFile = (0, util_1.promisify)(fs.readFile); // tslint:disable-next-line:no-require-imports var messages = require('./proto/api_pb'); var SAVED_MODEL_FILE_NAME = '/saved_model.pb'; var SAVED_MODEL_INIT_OP_KEY = '__saved_model_init_op'; // This map is used to keep track of loaded SavedModel metagraph mapping // information. The map key is TFSavedModel id in JavaScript, value is // an object of path to the SavedModel, metagraph tags, and loaded Session ID in // the c++ bindings. When user loads a SavedModel signature, it will go through // entries in this map to find if the corresponding SavedModel session has // already been loaded in C++ addon and will reuse it if existing. var loadedSavedModelPathMap = new Map(); // The ID of loaded TFSavedModel. This ID is used to keep track of loaded // TFSavedModel, so the loaded session in c++ bindings for the corresponding // TFSavedModel can be properly reused/disposed. var nextTFSavedModelId = 0; /** * Get a key in an object by its value. This is used to get protobuf enum value * from index. * * @param object * @param value */ // tslint:disable-next-line:no-any function getEnumKeyFromValue(object, value) { return Object.keys(object).find(function (key) { return object[key] === value; }); } exports.getEnumKeyFromValue = getEnumKeyFromValue; /** * Read SavedModel proto message from path. * * @param path Path to SavedModel folder. */ function readSavedModelProto(path) { return __awaiter(this, void 0, void 0, function () { var modelFile, array; return __generator(this, function (_a) { switch (_a.label) { case 0: // Load the SavedModel pb file and deserialize it into message. try { fs.accessSync(path + SAVED_MODEL_FILE_NAME, fs.constants.R_OK); } catch (error) { throw new Error('There is no saved_model.pb file in the directory: ' + path); } return [4 /*yield*/, readFile(path + SAVED_MODEL_FILE_NAME)]; case 1: modelFile = _a.sent(); array = new Uint8Array(modelFile); return [2 /*return*/, messages.SavedModel.deserializeBinary(array)]; } }); }); } exports.readSavedModelProto = readSavedModelProto; /** * Inspect the MetaGraphs of the SavedModel from the provided path. This * function will return an array of `MetaGraphInfo` objects. * * @param path Path to SavedModel folder. * * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'} */ function getMetaGraphsFromSavedModel(path) { return __awaiter(this, void 0, void 0, function () { var result, modelMessage, metaGraphList, i, metaGraph, tags, signatureDef, signatureDefMap, signatureDefKeys, key, signatureDefEntry, inputsMapMessage, inputsMapKeys, inputs, inputsMapKey, inputTensor, inputTensorInfo, dtype, outputsMapMessage, outputsMapKeys, outputs, outputsMapKey, outputTensor, outputTensorInfo, dtype; return __generator(this, function (_a) { switch (_a.label) { case 0: result = []; return [4 /*yield*/, readSavedModelProto(path)]; case 1: modelMessage = _a.sent(); metaGraphList = modelMessage.getMetaGraphsList(); for (i = 0; i < metaGraphList.length; i++) { metaGraph = {}; tags = metaGraphList[i].getMetaInfoDef().getTagsList(); metaGraph.tags = tags; signatureDef = {}; signatureDefMap = metaGraphList[i].getSignatureDefMap(); signatureDefKeys = signatureDefMap.keys(); // Go through all signatureDefs while (true) { key = signatureDefKeys.next(); if (key.done) { break; } // Skip TensorFlow internal Signature '__saved_model_init_op'. if (key.value === SAVED_MODEL_INIT_OP_KEY) { continue; } signatureDefEntry = signatureDefMap.get(key.value); inputsMapMessage = signatureDefEntry.getInputsMap(); inputsMapKeys = inputsMapMessage.keys(); inputs = {}; while (true) { inputsMapKey = inputsMapKeys.next(); if (inputsMapKey.done) { break; } inputTensor = inputsMapMessage.get(inputsMapKey.value); inputTensorInfo = {}; dtype = getEnumKeyFromValue(messages.DataType, inputTensor.getDtype()); inputTensorInfo.dtype = mapTFDtypeToJSDtype(dtype); inputTensorInfo.tfDtype = dtype; inputTensorInfo.name = inputTensor.getName(); inputTensorInfo.shape = inputTensor.getTensorShape().getDimList(); inputs[inputsMapKey.value] = inputTensorInfo; } outputsMapMessage = signatureDefEntry.getOutputsMap(); outputsMapKeys = outputsMapMessage.keys(); outputs = {}; while (true) { outputsMapKey = outputsMapKeys.next(); if (outputsMapKey.done) { break; } outputTensor = outputsMapMessage.get(outputsMapKey.value); outputTensorInfo = {}; dtype = getEnumKeyFromValue(messages.DataType, outputTensor.getDtype()); outputTensorInfo.dtype = mapTFDtypeToJSDtype(dtype); outputTensorInfo.tfDtype = dtype; outputTensorInfo.name = outputTensor.getName(); outputTensorInfo.shape = outputTensor.getTensorShape().getDimList(); outputs[outputsMapKey.value] = outputTensorInfo; } signatureDef[key.value] = { inputs: inputs, outputs: outputs }; } metaGraph.signatureDefs = signatureDef; result.push(metaGraph); } return [2 /*return*/, result]; } }); }); } exports.getMetaGraphsFromSavedModel = getMetaGraphsFromSavedModel; /** * Get SignatureDefEntry from SavedModel metagraphs info. The SignatureDefEntry * will be used when executing a SavedModel signature. * * @param savedModelInfo The MetaGraphInfo array loaded through * getMetaGraphsFromSavedModel(). * @param tags The tags of the MetaGraph to get input/output node names from. * @param signature The signature to get input/output node names from. */ function getSignatureDefEntryFromMetaGraphInfo(savedModelInfo, tags, signature) { for (var i = 0; i < savedModelInfo.length; i++) { var metaGraphInfo = savedModelInfo[i]; if (stringArraysHaveSameElements(tags, metaGraphInfo.tags)) { if (metaGraphInfo.signatureDefs[signature] == null) { throw new Error('The SavedModel does not have signature: ' + signature); } return metaGraphInfo.signatureDefs[signature]; } } throw new Error("The SavedModel does not have tags: ".concat(tags)); } exports.getSignatureDefEntryFromMetaGraphInfo = getSignatureDefEntryFromMetaGraphInfo; /** * A `tf.TFSavedModel` is a signature loaded from a SavedModel * metagraph, and allows inference execution. * * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'} */ var TFSavedModel = /** @class */ (function () { function TFSavedModel(sessionId, jsid, signature, backend) { this.sessionId = sessionId; this.jsid = jsid; this.signature = signature; this.backend = backend; this.disposed = false; } Object.defineProperty(TFSavedModel.prototype, "inputs", { /** * Return the array of input tensor info. * * @doc {heading: 'Models', subheading: 'SavedModel'} */ get: function () { var entries = this.signature.inputs; var results = Object.keys(entries).map(function (key) { return entries[key]; }); results.forEach(function (info) { info.name = info.name.replace(/:0$/, ''); }); return results; }, enumerable: false, configurable: true }); Object.defineProperty(TFSavedModel.prototype, "outputs", { /** * Return the array of output tensor info. * * @doc {heading: 'Models', subheading: 'SavedModel'} */ get: function () { var entries = this.signature.outputs; var results = Object.keys(entries).map(function (key) { return entries[key]; }); results.forEach(function (info) { info.name = info.name.replace(/:0$/, ''); }); return results; }, enumerable: false, configurable: true }); /** * Delete the SavedModel from nodeBackend and delete corresponding session in * the C++ backend if the session is only used by this TFSavedModel. * * @doc {heading: 'Models', subheading: 'SavedModel'} */ TFSavedModel.prototype.dispose = function () { if (!this.disposed) { this.disposed = true; loadedSavedModelPathMap.delete(this.jsid); for (var _i = 0, _a = Array.from(loadedSavedModelPathMap.keys()); _i < _a.length; _i++) { var id = _a[_i]; var value = loadedSavedModelPathMap.get(id); if (value.sessionId === this.sessionId) { return; } } this.backend.deleteSavedModel(this.sessionId); } else { throw new Error('This SavedModel has already been deleted.'); } }; Object.defineProperty(TFSavedModel.prototype, "outputNodeNames", { get: function () { var _this = this; if (this.outputNodeNames_ != null) { return this.outputNodeNames_; } this.outputNodeNames_ = Object.keys(this.signature.outputs) .reduce(function (names, key) { names[key] = _this.signature.outputs[key].name; return names; }, {}); return this.outputNodeNames_; }, enumerable: false, configurable: true }); /** * Execute the inference for the input tensors. * * @param input The input tensors, when there is single input for the model, * inputs param should be a Tensor. For models with multiple inputs, inputs * params should be in either Tensor[] if the input order is fixed, or * otherwise NamedTensorMap format. The keys in the NamedTensorMap are the * name of input tensors in SavedModel signatureDef. It can be found through * `tf.node.getMetaGraphsFromSavedModel()`. * * For batch inference execution, the tensors for each input need to be * concatenated together. For example with mobilenet, the required input shape * is [1, 244, 244, 3], which represents the [batch, height, width, channel]. * If we are provide a batched data of 100 images, the input tensor should be * in the shape of [100, 244, 244, 3]. * * @param config Prediction configuration for specifying the batch size. * * @returns Inference result tensors. The output would be single Tensor if * model has single output node, otherwise Tensor[] or NamedTensorMap[] will * be returned for model with multiple outputs. * * @doc {heading: 'Models', subheading: 'SavedModel'} */ TFSavedModel.prototype.predict = function (inputs, config) { var _this = this; if (this.disposed) { throw new Error('The TFSavedModel has already been deleted!'); } else { var inputTensors = []; if (inputs instanceof tfjs_1.Tensor) { inputTensors.push(inputs); var result = this.backend.runSavedModel(this.sessionId, inputTensors, Object.values(this.signature.inputs), Object.values(this.outputNodeNames)); return result.length > 1 ? result : result[0]; } else if (Array.isArray(inputs)) { inputTensors = inputs; return this.backend.runSavedModel(this.sessionId, inputTensors, Object.values(this.signature.inputs), Object.values(this.outputNodeNames)); } else { var inputTensorNames = Object.keys(this.signature.inputs); var providedInputNames = Object.keys(inputs); if (!stringArraysHaveSameElements(inputTensorNames, providedInputNames)) { throw new Error("The model signatureDef input names are ".concat(inputTensorNames.join(), ", however the provided input names are ").concat(providedInputNames.join(), ".")); } var inputNodeNamesArray = []; for (var i = 0; i < inputTensorNames.length; i++) { inputTensors.push(inputs[inputTensorNames[i]]); inputNodeNamesArray.push(this.signature.inputs[inputTensorNames[i]]); } var outputTensorNames = Object.keys(this.outputNodeNames); var outputNodeNamesArray = []; for (var i = 0; i < outputTensorNames.length; i++) { outputNodeNamesArray.push(this.outputNodeNames[outputTensorNames[i]]); } var outputTensors_1 = this.backend.runSavedModel(this.sessionId, inputTensors, inputNodeNamesArray, outputNodeNamesArray); tfjs_1.util.assert(outputTensors_1.length === outputNodeNamesArray.length, function () { return 'Output tensors do not match output node names, ' + "receive ".concat(outputTensors_1.length, ") output tensors but ") + "there are ".concat(_this.outputNodeNames.length, " output nodes."); }); var outputMap = {}; for (var i = 0; i < outputTensorNames.length; i++) { outputMap[outputTensorNames[i]] = outputTensors_1[i]; } return outputMap; } } }; /** * Execute the inference for the input tensors and return activation * values for specified output node names without batching. * * @param input The input tensors, when there is single input for the model, * inputs param should be a Tensor. For models with multiple inputs, inputs * params should be in either Tensor[] if the input order is fixed, or * otherwise NamedTensorMap format. * * @param outputs string|string[]. List of output node names to retrieve * activation from. * * @returns Activation values for the output nodes result tensors. The return * type matches specified parameter outputs type. The output would be single * Tensor if single output is specified, otherwise Tensor[] for multiple * outputs. * * @doc {heading: 'Models', subheading: 'SavedModel'} */ TFSavedModel.prototype.execute = function (inputs, outputs) { throw new Error('execute() of TFSavedModel is not supported yet.'); }; return TFSavedModel; }()); exports.TFSavedModel = TFSavedModel; /** * Load a TensorFlow SavedModel from disk. TensorFlow SavedModel is different * from TensorFlow.js model format. A SavedModel is a directory containing * serialized signatures and the states needed to run them. The directory has a * saved_model.pb (or saved_model.pbtxt) file storing the actual TensorFlow * program, or model, and a set of named signatures, each identifying a * function. The directory also has a variables directory contains a standard * training checkpoint. The directory may also has a assets directory contains * files used by the TensorFlow graph, for example text files used to initialize * vocabulary tables. These are supported datatypes: float32, int32, complex64, * string.For more information, see this guide: * https://www.tensorflow.org/guide/saved_model. * * @param path The path to the SavedModel. * @param tags The tags of the MetaGraph to load. The available tags of a * SavedModel can be retrieved through tf.node.getMetaGraphsFromSavedModel() * API. Defaults to ['serve']. * @param signature The name of the SignatureDef to load. The available * SignatureDefs of a SavedModel can be retrieved through * tf.node.getMetaGraphsFromSavedModel() API. Defaults to 'serving_default'. * * @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'} */ function loadSavedModel(path, tags, signature) { if (tags === void 0) { tags = ['serve']; } if (signature === void 0) { signature = 'serving_default'; } return __awaiter(this, void 0, void 0, function () { var backend, savedModelInfo, signatureDefEntry, sessionId, _i, _a, id_1, modelInfo, tagsString, id, savedModel; return __generator(this, function (_b) { switch (_b.label) { case 0: (0, nodejs_kernel_backend_1.ensureTensorflowBackend)(); backend = (0, nodejs_kernel_backend_1.nodeBackend)(); return [4 /*yield*/, getMetaGraphsFromSavedModel(path)]; case 1: savedModelInfo = _b.sent(); signatureDefEntry = getSignatureDefEntryFromMetaGraphInfo(savedModelInfo, tags, signature); for (_i = 0, _a = Array.from(loadedSavedModelPathMap.keys()); _i < _a.length; _i++) { id_1 = _a[_i]; modelInfo = loadedSavedModelPathMap.get(id_1); if (modelInfo.path === path && stringArraysHaveSameElements(modelInfo.tags, tags)) { sessionId = modelInfo.sessionId; } } if (sessionId == null) { tagsString = tags.join(','); sessionId = backend.loadSavedModelMetaGraph(path, tagsString); } id = nextTFSavedModelId++; savedModel = new TFSavedModel(sessionId, id, signatureDefEntry, backend); loadedSavedModelPathMap.set(id, { path: path, tags: tags, sessionId: sessionId }); return [2 /*return*/, savedModel]; } }); }); } exports.loadSavedModel = loadSavedModel; /** * Compare if two unsorted arrays of string have the same elements. * @param arrayA * @param arrayB */ function stringArraysHaveSameElements(arrayA, arrayB) { if (arrayA.length === arrayB.length && arrayA.sort().join() === arrayB.sort().join()) { return true; } return false; } function mapTFDtypeToJSDtype(tfDtype) { switch (tfDtype) { case 'DT_FLOAT': return 'float32'; case 'DT_INT64': case 'DT_INT32': case 'DT_UINT8': return 'int32'; case 'DT_BOOL': return 'bool'; case 'DT_COMPLEX64': return 'complex64'; case 'DT_STRING': return 'string'; default: throw new Error('Unsupported tensor DataType: ' + tfDtype + ', try to modify the model in python to convert the datatype'); } } function getNumOfSavedModels() { (0, nodejs_kernel_backend_1.ensureTensorflowBackend)(); var backend = (0, nodejs_kernel_backend_1.nodeBackend)(); return backend.getNumOfSavedModels(); } exports.getNumOfSavedModels = getNumOfSavedModels;