@tensorflow/tfjs-layers
Version:
TensorFlow layers API in JavaScript
261 lines (260 loc) • 10.1 kB
TypeScript
/**
* @license
* Copyright 2018 Google LLC
*
* Use of this source code is governed by an MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT.
* =============================================================================
*/
/// <amd-module name="@tensorflow/tfjs-layers/dist/backend/tfjs_backend" />
/**
* deeplearn.js backend.
*/
import * as tfc from '@tensorflow/tfjs-core';
import { Tensor, Tensor1D } from '@tensorflow/tfjs-core';
import { DataFormat, Shape } from '../keras_format/common';
import { HasShape } from '../types';
export declare function setBackend(requestedBackend: 'cpu' | 'webgl'): void;
export declare function getBackend(): 'cpu' | 'webgl';
/**
* Indicates whether the backend is operating symbolically.
*
* This function will be used to determine how to interpret user code. If
* it returns true, calls to the backend construct a symbolic graph; if
* it returns false, calls to the backend execute immediately.
*/
export declare function isBackendSymbolic(): boolean;
/**
* Get the number of elements in a Tensor.
* @param x The Tensor.
* @return Number of elements in `x`.
*/
export declare function countParams(x: HasShape): number;
/**
* Casts a tensor to a different dtype and returns it.
* @param x Input tensor.
* @param dtype String: 'float32'|'int32'|'bool'.
* @returns Tensor of the specified `dtype`.
*/
export declare function cast(x: Tensor, dtype: tfc.DataType): Tensor;
/**
* Adds a 1-sized dimension at index "axis".
* @param x Input tensor.
* @param axis Position where to add the new axis.
* @returns Result of the dimension expansion.
*/
export declare function expandDims(x: Tensor, axis?: number): Tensor;
/**
* Repeats a 2D tensor.
*
* If `x` has shape `[samples, dim]` and `n` is 2, for example, the output
* will have shape `[samples, 2, dim]`.
*
* @param x Input tensor.
* @param n Integer, number of times to repeat.
* @returns The result of the repeat operation.
* @throws ValueError: If input tensor is not 2D.
*/
export declare function repeat(x: Tensor, n: number): Tensor;
/**
* Flatten a Tensor into 1D.
* @param x Input tensor.
* @return The result of the flattening `x`.
*/
export declare function flatten(x: Tensor): Tensor;
/**
* Turn a nD tensor into a 2D tensor with same 0th dimension.
* In other words, it flattens each data samples of a batch.
*
* @param x The tensor to flatten. The rank of this tensor is required to be 2
* or higher.
* @return The result of the flattening.
*/
export declare function batchFlatten(x: Tensor): Tensor;
/**
* Do slicing along the first axis.
* @param array input `tf.Tensor`.
* @param start starting index, inclusive.
* @param size size of the slice along the first axis.
* @returns result of the slicing.
* @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.
*/
export declare function sliceAlongFirstAxis(array: Tensor, start: number, size: number): Tensor;
/**
* Do slicing along the last axis.
* @param array input `tf.Tensor`.
* @param start starting index, inclusive.
* @param size size of the slice along the last axis.
* @returns result of the slicing.
* @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.
*/
export declare function sliceAlongLastAxis(array: Tensor, start: number, size: number): Tensor;
/**
* Do slicing along the sepcified axis.
* @param array input `tf.Tensor`.
* @param start starting index, inclusive.
* @param size of the slice along the chosen axis.
* @param choose an axis.
* @returns result of the slicing.
* @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.
*/
export declare function sliceAlongAxis(array: Tensor, start: number, size: number, axis: number): Tensor;
/**
* Concatenates a list of tensors alongside the specified axis.
* @param tensors `Array` of tensors to concatenate.
* @param axis Concatenation axis.
* @returns The result of the concatenation.
*/
export declare function concatenate(tensors: Tensor[], axis?: number): Tensor;
/**
* Concatenate two arrays along the first dimension.
* @param a The 1st `tf.Tensor` to concatenate.
* @param b The 2nd `tf.Tensor` to concatenate.
* @returns Result of the concatenation.
* @throws ValueError: If `a` is of an unsupported subtype of `tf.Tensor`.
*/
export declare function concatAlongFirstAxis(a: Tensor, b: Tensor): Tensor;
/**
* Creates a tensor by tiling `x` by `n`.
* @param x A tensor.
* @param n An Array of integers or a single integer. If an Array, the length
* must be the same as the number of dimensions in `x`. If a single integer,
* it will be treated as an Array of length 1.
*/
export declare function tile(x: Tensor, n: number | number[]): Tensor;
/**
* Get a tensor with normal distribution of values.
*
* @param shape Shape of the tensor.
* @param mean mean value of the normal distribution.
* @param stddev standard deviation of the normal distribution.
* @param dtype
* @param seed
* @return The normal tensor.
*/
export declare function randomNormal(shape: Shape, mean?: number, stddev?: number, dtype?: 'float32' | 'int32', seed?: number): Tensor;
/**
* Multiply two tensors and returns the result as a tensor.
*
* For 2D tensors, this is equivalent to matrix multiplication (matMul).
* For tensors of higher ranks, it follows the Theano behavior,
* (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`). From the Theano documentation:
*
* For N dimensions it is a sum product over the last axis of x and the
* second-to-last of y:
*
* @param a A tensor of at least rank 2.
* @param b A tensor of at least rank 2.
* @param activation (optional) A string identifying the activation
* function.
* @return Result of the dot operation.
*/
export declare function dot(a: Tensor, b: Tensor, activation?: tfc.fused.Activation, bias?: Tensor): Tensor;
/**
* Compute the sign Tensor of an input Tensor.
*
* Elements of the input `tf.Tensor` that are === 0 are mapped to 0.
* Elements of the input `tf.Tensor` that are > 0 are mapped to 1.
* Elements of the input `tf.Tensor` that are < 0 are mapped to -1.
*
* @param x Input `tf.Tensor`.
* @return The sign `tf.Tensor`.
*/
export declare function sign(x: Tensor): Tensor;
/**
* Computes the one-hot representation of an integer tensor.
* @param indices nD integer tensor of shape
* `(batch_size, dim1, dim2, ... dim(n-1))`
* @param numClasses Integer, number of classes to consider.
* @returns (n + 1)D one hot representation of the input
* with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
*/
export declare function oneHot(indices: Tensor, numClasses: number): Tensor;
/**
* Retrieves the elements of indices `indices` in the tensor `reference`.
* @param reference A tensor.
* @param indices An integer tensor of indices or an `Array` of integers.
* @param axis Axis along which to perform the gather operation.
* @returns The result of the gathering as a tensor.
*/
export declare function gather(reference: Tensor, indices: number[] | Tensor1D, axis?: number): Tensor;
/**
* Element-wise square.
* @param x Input tensor.
* @return element-wise x^2
*/
export declare function square(x: Tensor): Tensor;
/**
* Element-wise exponentiation.
*
* Porting Note: In PyKeras, `a` (the exponent) is a Python integer, which
* takes advatnage of the backend's (e.g., TensorFlow's) automatic
* conversion to tensor. Here we allow `a` to be either a number or a tensor.
*
* @param x The base tensor.
* @param a The exponent, tensor or number. If a number, it is rounded to the
* nearest integer and converted to a tensor.
* @returns A tensor of the same shape as `x`.
*/
export declare function pow(x: Tensor, a: Tensor | number): Tensor;
/**
* Add a bias to a tensor.
*
* @param x The tensor to add the bias to.
* @param bias The bias to add to `x`. Must be 1D or the same rank as `x`.
* @return Result of the bias adding.
* @throws ValueError: If the rank of `bias` is incorrect.
*/
export declare function biasAdd(x: Tensor, bias: Tensor, dataFormat?: DataFormat): Tensor;
/**
* Exponential linear unit (ELU).
* @param x A tensor or variable to compute the activation function for.
* @param alpha: A scalar, a scaling factor for the negative section.
* @return Output of the ELU operation.
*/
export declare function elu(x: Tensor, alpha?: number): Tensor;
/**
* Softsign of a tensor.
*
* Defined as x / (abs(x) + 1), element-wise.
*
* @param x: Input.
* @returns Output.
*/
export declare function softsign(x: Tensor): Tensor;
/**
* Sets entries in `x` to zero at random, while scaling the entire tensor.
*
* @param x input tensor.
* @param level fraction of the entries in the tensor that will be set to 0.
* @param noiseShape shape of randomly generated keep/drop flags, must be
* broadcastable to the shape of `x`. Optional.
* @param seed random seed to ensure determinism. Optional.
* @returns Result of the dropout operation.
*/
export declare function dropout(x: Tensor, level: number, noiseShape?: number[], seed?: number): Tensor;
/**
* Element-wise, segment-wise linear approximation of sigmoid.
*
* Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
* In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
*
* @param x Input tensor.
* @returns Output tensor.
*/
export declare function hardSigmoid(x: Tensor): Tensor;
/**
* Invoke `x` in the training phase, and `alt` otherwise.
*
* Porting Note: We do not create placeholder tensors for the `training`
* boolean flag here, because there is no such thing in the TF.js imperative
* backend.
*
* @param x The function to invoke iff `training` is `true`.
* @param alt The function to invoke iff `training` is `false`.
* @param training Boolean flag for whether training phase is active.
* @returns The return value of `x()` if `training` is `true`, or the return
* value of `alt()` if `training` is `false`.
*/
export declare function inTrainPhase<T>(x: () => T, alt: () => T, training?: boolean): T;