@tensorflow/tfjs-node
Version:
This repository provides native TensorFlow execution in backend JavaScript applications under the Node.js runtime, accelerated by the TensorFlow C binary under the hood. It provides the same API as [TensorFlow.js](https://js.tensorflow.org/api/latest/).
159 lines (158 loc) • 7.21 kB
TypeScript
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import { InferenceModel, MetaGraph, ModelPredictConfig, ModelTensorInfo, NamedTensorMap, SignatureDefEntry, Tensor } from '@tensorflow/tfjs';
import { NodeJSKernelBackend } from './nodejs_kernel_backend';
/**
* Get a key in an object by its value. This is used to get protobuf enum value
* from index.
*
* @param object
* @param value
*/
export declare function getEnumKeyFromValue(object: any, value: number): string;
/**
* Read SavedModel proto message from path.
*
* @param path Path to SavedModel folder.
*/
export declare function readSavedModelProto(path: string): Promise<any>;
/**
* Inspect the MetaGraphs of the SavedModel from the provided path. This
* function will return an array of `MetaGraphInfo` objects.
*
* @param path Path to SavedModel folder.
*
* @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
*/
export declare function getMetaGraphsFromSavedModel(path: string): Promise<MetaGraph[]>;
/**
* Get SignatureDefEntry from SavedModel metagraphs info. The SignatureDefEntry
* will be used when executing a SavedModel signature.
*
* @param savedModelInfo The MetaGraphInfo array loaded through
* getMetaGraphsFromSavedModel().
* @param tags The tags of the MetaGraph to get input/output node names from.
* @param signature The signature to get input/output node names from.
*/
export declare function getSignatureDefEntryFromMetaGraphInfo(savedModelInfo: MetaGraph[], tags: string[], signature: string): SignatureDefEntry;
/**
* A `tf.TFSavedModel` is a signature loaded from a SavedModel
* metagraph, and allows inference execution.
*
* @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
*/
export declare class TFSavedModel implements InferenceModel {
private sessionId;
private jsid;
private signature;
private backend;
private disposed;
private outputNodeNames_;
constructor(sessionId: number, jsid: number, signature: SignatureDefEntry, backend: NodeJSKernelBackend);
/**
* Return the array of input tensor info.
*
* @doc {heading: 'Models', subheading: 'SavedModel'}
*/
get inputs(): ModelTensorInfo[];
/**
* Return the array of output tensor info.
*
* @doc {heading: 'Models', subheading: 'SavedModel'}
*/
get outputs(): ModelTensorInfo[];
/**
* Delete the SavedModel from nodeBackend and delete corresponding session in
* the C++ backend if the session is only used by this TFSavedModel.
*
* @doc {heading: 'Models', subheading: 'SavedModel'}
*/
dispose(): void;
get outputNodeNames(): {
[key: string]: string;
};
/**
* Execute the inference for the input tensors.
*
* @param input The input tensors, when there is single input for the model,
* inputs param should be a Tensor. For models with multiple inputs, inputs
* params should be in either Tensor[] if the input order is fixed, or
* otherwise NamedTensorMap format. The keys in the NamedTensorMap are the
* name of input tensors in SavedModel signatureDef. It can be found through
* `tf.node.getMetaGraphsFromSavedModel()`.
*
* For batch inference execution, the tensors for each input need to be
* concatenated together. For example with mobilenet, the required input shape
* is [1, 244, 244, 3], which represents the [batch, height, width, channel].
* If we are provide a batched data of 100 images, the input tensor should be
* in the shape of [100, 244, 244, 3].
*
* @param config Prediction configuration for specifying the batch size.
*
* @returns Inference result tensors. The output would be single Tensor if
* model has single output node, otherwise Tensor[] or NamedTensorMap[] will
* be returned for model with multiple outputs.
*
* @doc {heading: 'Models', subheading: 'SavedModel'}
*/
predict(inputs: Tensor | Tensor[] | NamedTensorMap, config?: ModelPredictConfig): Tensor | Tensor[] | NamedTensorMap;
/**
* Execute the inference for the input tensors and return activation
* values for specified output node names without batching.
*
* @param input The input tensors, when there is single input for the model,
* inputs param should be a Tensor. For models with multiple inputs, inputs
* params should be in either Tensor[] if the input order is fixed, or
* otherwise NamedTensorMap format.
*
* @param outputs string|string[]. List of output node names to retrieve
* activation from.
*
* @returns Activation values for the output nodes result tensors. The return
* type matches specified parameter outputs type. The output would be single
* Tensor if single output is specified, otherwise Tensor[] for multiple
* outputs.
*
* @doc {heading: 'Models', subheading: 'SavedModel'}
*/
execute(inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]): Tensor | Tensor[];
}
/**
* Load a TensorFlow SavedModel from disk. TensorFlow SavedModel is different
* from TensorFlow.js model format. A SavedModel is a directory containing
* serialized signatures and the states needed to run them. The directory has a
* saved_model.pb (or saved_model.pbtxt) file storing the actual TensorFlow
* program, or model, and a set of named signatures, each identifying a
* function. The directory also has a variables directory contains a standard
* training checkpoint. The directory may also has a assets directory contains
* files used by the TensorFlow graph, for example text files used to initialize
* vocabulary tables. These are supported datatypes: float32, int32, complex64,
* string.For more information, see this guide:
* https://www.tensorflow.org/guide/saved_model.
*
* @param path The path to the SavedModel.
* @param tags The tags of the MetaGraph to load. The available tags of a
* SavedModel can be retrieved through tf.node.getMetaGraphsFromSavedModel()
* API. Defaults to ['serve'].
* @param signature The name of the SignatureDef to load. The available
* SignatureDefs of a SavedModel can be retrieved through
* tf.node.getMetaGraphsFromSavedModel() API. Defaults to 'serving_default'.
*
* @doc {heading: 'Models', subheading: 'SavedModel', namespace: 'node'}
*/
export declare function loadSavedModel(path: string, tags?: string[], signature?: string): Promise<TFSavedModel>;
export declare function getNumOfSavedModels(): number;