@vladmandic/human
Version:
Human: AI-powered 3D Face Detection & Rotation Tracking, Face Description & Recognition, Body Pose Tracking, 3D Hand & Finger Tracking, Iris Analysis, Age & Gender & Emotion Prediction, Gesture Recognition
1,465 lines (1,274 loc) • 410 kB
TypeScript
/// <reference path="../src/types/webgpu.d.ts" />
export declare const Abs = "Abs";
export declare const abs: typeof abs_;
/**
* Computes absolute value element-wise: `abs(x)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.abs().print(); // or tf.abs(x)
* ```
* @param x The input `tf.Tensor`.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function abs_<T extends Tensor>(x: T | TensorLike): T;
export declare type AbsInputs = UnaryInputs;
export declare const Acos = "Acos";
export declare const acos: typeof acos_;
/**
* Computes acos of the input `tf.Tensor` element-wise: `acos(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.acos().print(); // or tf.acos(x)
* ```
* @param x The input tensor.
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function acos_<T extends Tensor>(x: T | TensorLike): T;
export declare const Acosh = "Acosh";
export declare const acosh: typeof acosh_;
/**
* Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:
* `acosh(x)`
*
* ```js
* const x = tf.tensor1d([10, 1, 3, 5.7]);
*
* x.acosh().print(); // or tf.acosh(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function acosh_<T extends Tensor>(x: T | TensorLike): T;
export declare type AcoshInputs = UnaryInputs;
export declare type AcosInputs = UnaryInputs;
declare type Activation = 'linear' | 'relu' | 'prelu' | 'elu' | 'relu6' | 'leakyrelu' | 'sigmoid';
/** @doclink Optimizer */
export declare class AdadeltaOptimizer extends Optimizer {
protected learningRate: number;
protected rho: number;
protected epsilon: number;
/** @nocollapse */
static get className(): string;
private accumulatedGrads;
private accumulatedUpdates;
constructor(learningRate: number, rho: number, epsilon?: number);
applyGradients(variableGradients: NamedVariableMap | NamedTensor[]): void;
dispose(): void;
getWeights(): Promise<NamedTensor[]>;
setWeights(weightValues: NamedTensor[]): Promise<void>;
getConfig(): ConfigDict;
/** @nocollapse */
static fromConfig<T extends Serializable>(cls: SerializableConstructor<T>, config: ConfigDict): T;
}
/** @doclink Optimizer */
export declare class AdagradOptimizer extends Optimizer {
protected learningRate: number;
private initialAccumulatorValue;
/** @nocollapse */
static get className(): string;
private accumulatedGrads;
constructor(learningRate: number, initialAccumulatorValue?: number);
applyGradients(variableGradients: NamedVariableMap | NamedTensor[]): void;
dispose(): void;
getWeights(): Promise<NamedTensor[]>;
setWeights(weightValues: NamedTensor[]): Promise<void>;
getConfig(): ConfigDict;
/** @nocollapse */
static fromConfig<T extends Serializable>(cls: SerializableConstructor<T>, config: ConfigDict): T;
}
export declare class AdamaxOptimizer extends Optimizer {
protected learningRate: number;
protected beta1: number;
protected beta2: number;
protected epsilon: number;
protected decay: number;
/** @nocollapse */
static get className(): string;
private accBeta1;
private iteration;
private accumulatedFirstMoment;
private accumulatedWeightedInfNorm;
constructor(learningRate: number, beta1: number, beta2: number, epsilon?: number, decay?: number);
applyGradients(variableGradients: NamedVariableMap | NamedTensor[]): void;
dispose(): void;
getWeights(): Promise<NamedTensor[]>;
setWeights(weightValues: NamedTensor[]): Promise<void>;
getConfig(): ConfigDict;
/** @nocollapse */
static fromConfig<T extends Serializable>(cls: SerializableConstructor<T>, config: ConfigDict): T;
}
export declare class AdamOptimizer extends Optimizer {
protected learningRate: number;
protected beta1: number;
protected beta2: number;
protected epsilon: number;
/** @nocollapse */
static get className(): string;
private accBeta1;
private accBeta2;
private accumulatedFirstMoment;
private accumulatedSecondMoment;
constructor(learningRate: number, beta1: number, beta2: number, epsilon?: number);
applyGradients(variableGradients: NamedVariableMap | NamedTensor[]): void;
dispose(): void;
getWeights(): Promise<NamedTensor[]>;
setWeights(weightValues: NamedTensor[]): Promise<void>;
getConfig(): ConfigDict;
/** @nocollapse */
static fromConfig<T extends Serializable>(cls: SerializableConstructor<T>, config: ConfigDict): T;
}
export declare const Add = "Add";
export declare const add: typeof add_;
/**
* Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
*
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
* const a = tf.scalar(5);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `tf.Tensor` to add.
* @param b The second `tf.Tensor` to add. Must have the same type as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function add_<T extends Tensor>(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
export declare type AddInputs = BinaryInputs;
export declare const AddN = "AddN";
export declare const addN: typeof addN_;
/**
* Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
*
* tf.addN([a, b, c]).print();
* ```
* @param tensors A list of tensors with the same shape and dtype.
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function addN_<T extends Tensor>(tensors: Array<T | TensorLike>): T;
export declare type AddNInputs = TensorInfo[];
export declare const All = "All";
export declare const all: typeof all_;
/**
* Computes the logical and of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If `axes` has no entries, all dimensions are reduced, and a
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 1, 1], 'bool');
*
* x.all().print(); // or tf.all(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
*
* const axis = 1;
* x.all(axis).print(); // or tf.all(x, axis)
* ```
*
* @param x The input tensor. Must be of dtype bool.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
declare function all_<T extends Tensor>(x: Tensor | TensorLike, axis?: number | number[], keepDims?: boolean): T;
export declare interface AllAttrs {
axis: number | number[];
keepDims: boolean;
}
export declare type AllInputs = Pick<NamedTensorInfoMap, 'x'>;
export declare const Any = "Any";
export declare const any: typeof any_;
/**
* Computes the logical or of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If `axes` has no entries, all dimensions are reduced, and a
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 1, 1], 'bool');
*
* x.any().print(); // or tf.any(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');
*
* const axis = 1;
* x.any(axis).print(); // or tf.any(x, axis)
* ```
*
* @param x The input tensor. Must be of dtype bool.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
declare function any_<T extends Tensor>(x: Tensor | TensorLike, axis?: number | number[], keepDims?: boolean): T;
export declare interface AnyAttrs {
axis: number | number[];
keepDims: boolean;
}
export declare type AnyInputs = Pick<NamedTensorInfoMap, 'x'>;
declare function applyActivation(x: Tensor, activation: Activation, preluActivationWeights?: Tensor, leakyreluAlpha?: number): Tensor;
export declare const ArgMax = "ArgMax";
export declare const argMax: typeof argMax_;
/**
* Returns the indices of the maximum values along an `axis`.
*
* The result has the same shape as `input` with the dimension along `axis`
* removed.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.argMax().print(); // or tf.argMax(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
* x.argMax(axis).print(); // or tf.argMax(x, axis)
* ```
*
* @param x The input tensor.
* @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
declare function argMax_<T extends Tensor>(x: Tensor | TensorLike, axis?: number): T;
export declare interface ArgMaxAttrs {
axis: number;
}
export declare type ArgMaxInputs = Pick<NamedTensorInfoMap, 'x'>;
export declare const ArgMin = "ArgMin";
export declare const argMin: typeof argMin_;
/**
* Returns the indices of the minimum values along an `axis`.
*
* The result has the same shape as `input` with the dimension along `axis`
* removed.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.argMin().print(); // or tf.argMin(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);
*
* const axis = 1;
* x.argMin(axis).print(); // or tf.argMin(x, axis)
* ```
*
* @param x The input tensor.
* @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
declare function argMin_<T extends Tensor>(x: Tensor | TensorLike, axis?: number): T;
export declare interface ArgMinAttrs {
axis: number;
}
export declare type ArgMinInputs = Pick<NamedTensorInfoMap, 'x'>;
/** @docalias number[] */
declare interface ArrayMap {
R0: number;
R1: number[];
R2: number[][];
R3: number[][][];
R4: number[][][][];
R5: number[][][][][];
R6: number[][][][][][];
}
declare function arraysEqual(n1: FlatVector, n2: FlatVector): boolean;
declare function arraysEqualWithNull(n1: number[], n2: number[]): boolean;
export declare const Asin = "Asin";
export declare const asin: typeof asin_;
/**
* Computes asin of the input `tf.Tensor` element-wise: `asin(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.asin().print(); // or tf.asin(x)
* ```
* @param x The input tensor.
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function asin_<T extends Tensor>(x: T | TensorLike): T;
export declare const Asinh = "Asinh";
export declare const asinh: typeof asinh_;
/**
* Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:
* `asinh(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.asinh().print(); // or tf.asinh(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function asinh_<T extends Tensor>(x: T | TensorLike): T;
export declare type AsinhInputs = UnaryInputs;
export declare type AsinInputs = UnaryInputs;
/**
* Asserts that the expression is true. Otherwise throws an error with the
* provided message.
*
* ```js
* const x = 2;
* tf.util.assert(x === 2, 'x is not 2');
* ```
*
* @param expr The expression to assert (as a boolean).
* @param msg A function that returns the message to report when throwing an
* error. We use a function for performance reasons.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
declare function assert(expr: boolean, msg: () => string): void;
declare function assertAndGetBroadcastShape(shapeA: number[], shapeB: number[]): number[];
declare function assertAxesAreInnerMostDims(msg: string, axes: number[], rank: number): void;
declare function assertNonNegativeIntegerDimensions(shape: number[]): void;
declare function assertNonNull(a: TensorLike): void;
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
declare function assertParamsConsistent(shapes: number[][], axis: number): void;
declare function assertParamsValid(input: TensorInfo, begin: number[], size: number[]): void;
declare function assertShapesMatch(shapeA: number[], shapeB: number[], errorMessagePrefix?: string): void;
declare function assertTypesMatch(a: Tensor, b: Tensor): void;
/**
* Insert a given complex value into the TypedArray.
* @param data The array in which the complex value is inserted.
* @param c The complex value to be inserted.
* @param index An index of the target complex value.
*/
declare function assignToTypedArray(data: TypedArray, real: number, imag: number, index: number): void;
export declare const Atan = "Atan";
export declare const atan: typeof atan_;
export declare const Atan2 = "Atan2";
export declare const atan2: typeof atan2_;
/**
* Computes arctangent of `tf.Tensor`s a / b element-wise: `atan2(a, b)`.
* Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1.0, 1.0, -1.0, .7]);
* const b = tf.tensor1d([2.0, 13.0, 3.5, .21]);
*
* tf.atan2(a, b).print()
* ```
*
* @param a The first tensor.
* @param b The second tensor. Must have the same dtype as `a`.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function atan2_<T extends Tensor>(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
export declare type Atan2Inputs = BinaryInputs;
/**
* Computes atan of the input `tf.Tensor` element-wise: `atan(x)`
*
* ```js
* const x = tf.tensor1d([0, 1, -1, .7]);
*
* x.atan().print(); // or tf.atan(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function atan_<T extends Tensor>(x: T | TensorLike): T;
export declare const Atanh = "Atanh";
export declare const atanh: typeof atanh_;
/**
* Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:
* `atanh(x)`
*
* ```js
* const x = tf.tensor1d([0, .1, -.1, .7]);
*
* x.atanh().print(); // or tf.atanh(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function atanh_<T extends Tensor>(x: T | TensorLike): T;
export declare type AtanhInputs = UnaryInputs;
export declare type AtanInputs = UnaryInputs;
/** These are extra non-tensor/primitive params passed to kernel functions. */
export declare type Attribute = AttributeValue | RecursiveArray<AttributeValue>;
declare type AttributeValue = number | number[] | boolean | boolean[] | string | string[] | NamedAttrMap;
export declare const AvgPool = "AvgPool";
export declare const avgPool: typeof avgPool_;
export declare const AvgPool3D = "AvgPool3D";
export declare const avgPool3d: typeof avgPool3d_;
/**
* Computes the 3D average pooling.
*
* ```js
* const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);
* const result = tf.avgPool3d(x, 2, 1, 'valid');
* result.print();
* ```
*
* @param x The input tensor, of rank 5 or rank 4 of shape
* `[batch, depth, height, width, inChannels]`.
* @param filterSize The filter size:
* `[filterDepth, filterHeight, filterWidth]`.
* If `filterSize` is a single number,
* then `filterDepth == filterHeight == filterWidth`.
* @param strides The strides of the pooling:
* `[strideDepth, strideHeight, strideWidth]`.
* If `strides` is a single number,
* then `strideDepth == strideHeight == strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1*1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
* @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to
* "NDHWC". Specify the data format of the input and output data. With the
* default format "NDHWC", the data is stored in the order of: [batch,
* depth, height, width, channels]. Only "NDHWC" is currently supported.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function avgPool3d_<T extends Tensor4D | Tensor5D>(x: T | TensorLike, filterSize: [number, number, number] | number, strides: [number, number, number] | number, pad: 'valid' | 'same' | number, dimRoundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW'): T;
export declare interface AvgPool3DAttrs {
filterSize: [number, number, number] | number;
strides: [number, number, number] | number;
pad: 'valid' | 'same' | number;
dimRoundingMode?: 'floor' | 'round' | 'ceil';
dataFormat: 'NDHWC' | 'NCDHW';
}
export declare const AvgPool3DGrad = "AvgPool3DGrad";
export declare interface AvgPool3DGradAttrs {
filterSize: [number, number, number] | number;
strides: [number, number, number] | number;
pad: 'valid' | 'same' | number;
dimRoundingMode?: 'floor' | 'round' | 'ceil';
}
export declare type AvgPool3DGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input'>;
export declare type AvgPool3DInputs = Pick<NamedTensorInfoMap, 'x'>;
/**
* Computes the 2D average pooling of an image.
*
* @param x The input tensor, of rank 4 or rank 3 of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm:
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function avgPool_<T extends Tensor3D | Tensor4D>(x: T | TensorLike, filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare interface AvgPoolAttrs {
filterSize: [number, number] | number;
strides: [number, number] | number;
pad: 'valid' | 'same' | number | ExplicitPadding;
dimRoundingMode?: 'floor' | 'round' | 'ceil';
}
export declare const AvgPoolGrad = "AvgPoolGrad";
export declare interface AvgPoolGradAttrs {
filterSize: [number, number] | number;
strides: [number, number] | number;
pad: 'valid' | 'same' | number | ExplicitPadding;
}
export declare type AvgPoolGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input'>;
export declare type AvgPoolInputs = Pick<NamedTensorInfoMap, 'x'>;
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns true if the axis specifies the inner most dimensions of the
* array.
*/
declare function axesAreInnerMostDims(axes: number[], rank: number): boolean;
declare interface Backend {
}
/**
* Gets the current backend. If no backends have been initialized, this will
* attempt to initialize the best backend. Will throw an error if the highest
* priority backend has async initialization, in which case you should call
* 'await tf.ready()' before running other code.
*
* @doc {heading: 'Backends'}
*/
export declare function backend(): KernelBackend;
declare namespace backend_util {
export {
fromUint8ToStringArray,
fromStringArrayToUint8,
slice_util,
BackendValues,
TypedArray,
upcastType,
PixelData,
MemoryInfo,
TimingInfo,
segment_util,
axesAreInnerMostDims,
combineLocations,
computeOutAndReduceShapes,
expandShapeToKeepDim,
assertAxesAreInnerMostDims,
getAxesPermutation,
getUndoAxesPermutation,
getInnerMostAxes,
getBroadcastDims,
getReductionAxes,
assertAndGetBroadcastShape,
assertParamsConsistent,
computeOutShape_3 as computeOutShape,
computeDilation2DInfo,
computePool2DInfo,
computePool3DInfo,
computeConv2DInfo,
computeConv3DInfo,
computeDefaultPad,
tupleValuesAreOne,
eitherStridesOrDilationsAreOne,
stridesOrDilationsArePositive,
convertConv2DDataFormat,
checkPadOnDimRoundingMode,
ExplicitPadding,
PadInfo,
PadInfo3D,
Conv2DInfo,
Conv3DInfo,
getFusedDyActivation,
getFusedBiasGradient,
applyActivation,
shouldFuse,
FusedConv2DConfig,
FusedBatchMatMulConfig,
Activation,
combineRaggedTensorToTensorShapes,
getRowPartitionTypesHelper,
getRaggedRank,
validateDefaultValueShape,
RowPartitionType,
computeOptimalWindowSize,
PARALLELIZE_THRESHOLD,
ReduceInfo,
getImageCenter,
getReshaped,
getPermuted,
getReshapedPermuted,
getSliceBeginCoords,
getSliceSize,
prepareAndValidate,
validateUpdateShape,
validateInput,
calculateShapes,
ScatterShapeInfo,
SELU_SCALEALPHA,
SELU_SCALE,
ERF_P,
ERF_A1,
ERF_A2,
ERF_A3,
ERF_A4,
ERF_A5,
warn,
log_2 as log,
mergeRealAndImagArrays,
splitRealAndImagArrays,
complexWithEvenIndex,
complexWithOddIndex,
getComplexWithIndex,
assignToTypedArray,
exponents,
exponent,
decodeEinsumEquation,
getEinsumPermutation,
checkEinsumDimSizes,
getEinsumComputePath,
isIdentityPermutation,
prepareSplitSize,
getSparseFillEmptyRowsIndicesDenseShapeMismatch,
getSparseFillEmptyRowsNegativeIndexErrorMessage,
getSparseFillEmptyRowsOutOfRangeIndexErrorMessage,
getSparseReshapeMultipleNegativeOneOutputDimErrorMessage,
getSparseReshapeNegativeOutputDimErrorMessage,
getSparseReshapeEmptyTensorZeroOutputDimErrorMessage,
getSparseReshapeInputOutputMultipleErrorMessage,
getSparseReshapeInputOutputMismatchErrorMessage,
getSparseSegmentReductionNegativeSegmentIdsErrorMessage,
getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage,
getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage,
getSparseSegmentReductionIndicesOutOfRangeErrorMessage
}
}
export { backend_util }
declare interface BackendTimer {
timerAvailable(): boolean;
time(f: () => void): Promise<BackendTimingInfo>;
}
export declare interface BackendTimingInfo {
kernelMs: number | {
error: string;
};
getExtraProfileInfo?(): string;
}
/** The underlying tensor data that gets stored in a backend. */
export declare type BackendValues = Float32Array | Int32Array | Uint8Array | Uint8Array[];
export declare const basicLSTMCell: typeof basicLSTMCell_;
/**
* Computes the next state and output of a BasicLSTMCell.
*
* Returns `[newC, newH]`.
*
* Derived from tf.contrib.rnn.BasicLSTMCell.
*
* @param forgetBias Forget bias for the cell.
* @param lstmKernel The weights for the cell.
* @param lstmBias The bias for the cell.
* @param data The input to the cell.
* @param c Previous cell state.
* @param h Previous cell output.
*
* @doc {heading: 'Operations', subheading: 'RNN'}
*/
declare function basicLSTMCell_(forgetBias: Scalar | TensorLike, lstmKernel: Tensor2D | TensorLike, lstmBias: Tensor1D | TensorLike, data: Tensor2D | TensorLike, c: Tensor2D | TensorLike, h: Tensor2D | TensorLike): [Tensor2D, Tensor2D];
export declare const BatchMatMul = "BatchMatMul";
export declare interface BatchMatMulAttrs {
transposeA: boolean;
transposeB: boolean;
}
export declare type BatchMatMulInputs = Pick<NamedTensorInfoMap, 'a' | 'b'>;
export declare const batchNorm: typeof batchNorm_;
export declare const batchNorm2d: typeof batchNorm2d_;
/**
* Batch normalization, strictly for 2D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
declare function batchNorm2d_(x: Tensor2D | TensorLike, mean: Tensor2D | Tensor1D | TensorLike, variance: Tensor2D | Tensor1D | TensorLike, offset?: Tensor2D | Tensor1D | TensorLike, scale?: Tensor2D | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor2D;
export declare const batchNorm3d: typeof batchNorm3d_;
/**
* Batch normalization, strictly for 3D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
declare function batchNorm3d_(x: Tensor3D | TensorLike, mean: Tensor3D | Tensor1D | TensorLike, variance: Tensor3D | Tensor1D | TensorLike, offset?: Tensor3D | Tensor1D | TensorLike, scale?: Tensor3D | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor3D;
export declare const batchNorm4d: typeof batchNorm4d_;
/**
* Batch normalization, strictly for 4D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
declare function batchNorm4d_(x: Tensor4D | TensorLike, mean: Tensor4D | Tensor1D | TensorLike, variance: Tensor4D | Tensor1D | TensorLike, offset?: Tensor4D | Tensor1D | TensorLike, scale?: Tensor4D | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor4D;
/**
* Batch normalization.
*
* As described in
* [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).
*
* Mean, variance, scale, and offset can be of two shapes:
* - The same shape as the input.
* - In the common case, the depth dimension is the last dimension of x, so
* the values would be a `tf.Tensor1D` of shape [depth].
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that parameters passed are of given rank
* - `tf.batchNorm2d`
* - `tf.batchNorm3d`
* - `tf.batchNorm4d`
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*
* @doc {heading: 'Operations', subheading: 'Normalization'}
*/
declare function batchNorm_<R extends Rank>(x: Tensor<R> | TensorLike, mean: Tensor<R> | Tensor1D | TensorLike, variance: Tensor<R> | Tensor1D | TensorLike, offset?: Tensor<R> | Tensor1D | TensorLike, scale?: Tensor<R> | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor<R>;
export declare const BatchToSpaceND = "BatchToSpaceND";
export declare const batchToSpaceND: typeof batchToSpaceND_;
/**
* This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
* shape `blockShape + [batch]`, interleaves these blocks back into the grid
* defined by the spatial dimensions `[1, ..., M]`, to obtain a result with
* the same rank as the input. The spatial dimensions of this intermediate
* result are then optionally cropped according to `crops` to produce the
* output. This is the reverse of `tf.spaceToBatchND`. See below for a precise
* description.
*
* ```js
* const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);
* const blockShape = [2, 2];
* const crops = [[0, 0], [0, 0]];
*
* x.batchToSpaceND(blockShape, crops).print();
* ```
*
* @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +
* remainingShape`, where spatialShape has `M` dimensions.
* @param blockShape A 1-D array. Must have shape `[M]`, all values must
* be >= 1.
* @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.
* `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input
* dimension `i + 1`, which corresponds to spatial dimension `i`. It is required
* that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`
*
* This operation is equivalent to the following steps:
*
* 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,
* blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,
* x.shape[N-1]]`
*
* 2. Permute dimensions of `reshaped` to produce `permuted` of shape `[batch /
* prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],
* blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
*
* 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /
* prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *
* blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`
*
* 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`
* according to `crops` to produce the output of shape: `[batch /
* prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],
* ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -
* crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function batchToSpaceND_<T extends Tensor>(x: T | TensorLike, blockShape: number[], crops: number[][]): T;
export declare interface BatchToSpaceNDAttrs {
blockShape: number[];
crops: number[][];
}
export declare type BatchToSpaceNDInputs = Pick<NamedTensorInfoMap, 'x'>;
export declare type BinaryInputs = Pick<NamedTensorInfoMap, 'a' | 'b'>;
export declare const Bincount = "Bincount";
export declare const bincount: typeof bincount_;
/**
* Outputs a vector with length `size` and the same dtype as `weights`.
*
* If `weights` are empty, then index `i` stores the number of times the value
* `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the
* sum of the value in `weights` at each index where the corresponding value in
* `x` is `i`.
*
* Values in `x` outside of the range [0, size) are ignored.
*
* @param x The input int tensor, rank 1.
* @param weights The weights tensor, must have the same shape as x, or a
* length-0 Tensor, in which case it acts as all weights equal to 1.
* @param size Non-negative integer.
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
declare function bincount_<T extends Tensor1D>(x: T | TensorLike, weights: T | TensorLike, size: number): T;
export declare interface BincountAttrs {
size: number;
}
export declare type BincountInputs = Pick<NamedTensorInfoMap, 'x' | 'weights'>;
export declare const BitwiseAnd = "BitwiseAnd";
export declare const bitwiseAnd: typeof bitwiseAnd_;
/**
* Bitwise `AND` operation for input tensors.
*
* Given two input tensors, returns a new tensor
* with the `AND` calculated values.
*
* The method supports int32 values
*
*
* ```js
* const x = tf.tensor1d([0, 5, 3, 14], 'int32');
* const y = tf.tensor1d([5, 0, 7, 11], 'int32');
* tf.bitwiseAnd(x, y).print();
* ```
*
* @param x The input tensor to be calculated.
* @param y The input tensor to be calculated.
*
* @doc {heading: 'Operations', subheading: 'Logical'}
*/
declare function bitwiseAnd_<R extends Rank>(x: Tensor, y: Tensor): Tensor<R>;
export declare type BitwiseAndInputs = BinaryInputs;
export declare const booleanMaskAsync: typeof booleanMaskAsync_;
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
declare function booleanMaskAsync_(tensor: Tensor | TensorLike, mask: Tensor | TensorLike, axis?: number): Promise<Tensor>;
declare namespace broadcast_util {
export {
getBroadcastDims,
getReductionAxes,
assertAndGetBroadcastShape
}
}
export { broadcast_util }
export declare const BroadcastArgs = "BroadcastArgs";
export declare const broadcastArgs: typeof broadcastArgs_;
/**
* Return the shape of s0 op s1 with broadcast.
*
* compute r0, the broadcasted shape as a tensor.
* s0, s1 and r0 are all integer vectors.
*
* This function returns the shape of the result of an operation between
* two tensors of size s0 and s1 performed with broadcast.
*
* @param s0 A tensor representing a shape
* @param s1 A tensor representing a shape
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function broadcastArgs_<R extends Rank>(s0: Tensor | TensorLike, s1: Tensor | TensorLike): Tensor<R>;
export declare type BroadcastArgsInputs = Pick<NamedTensorInfoMap, 's0' | 's1'>;
export declare const BroadcastTo = "BroadcastTo";
export declare const broadcastTo: typeof broadcastTo_;
/**
* Broadcast an array to a compatible shape NumPy-style.
*
* The tensor's shape is compared to the broadcast shape from end to beginning.
* Ones are prepended to the tensor's shape until it has the same length as
* the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is
* already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then
* the input tensor is tiled N times along that axis (using tf.tile).
*
* @param input The tensor that is to be broadcasted.
* @param shape The input is to be broadcast to this shape.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function broadcastTo_<R extends Rank>(x: Tensor | TensorLike, shape: ShapeMap[R]): Tensor<R>;
export declare interface BroadCastToAttrs {
shape: number[];
inputShape: number[];
}
export declare type BroadcastToInputs = Pick<NamedTensorInfoMap, 'x'>;
declare namespace browser {
export {
fromPixelsAsync,
toPixels,
draw,
fromPixels
}
}
export { browser }
/**
* Creates an IOHandler that loads model artifacts from user-selected files.
*
* This method can be used for loading from files such as user-selected files
* in the browser.
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* // Note: This code snippet won't run properly without the actual file input
* // elements in the HTML DOM.
*
* // Suppose there are two HTML file input (`<input type="file" ...>`)
* // elements.
* const uploadJSONInput = document.getElementById('upload-json');
* const uploadWeightsInput = document.getElementById('upload-weights');
* const model = await tf.loadLayersModel(tf.io.browserFiles(
* [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
* ```
*
* @param files `File`s to load from. Currently, this function supports only
* loading from files that contain Keras-style models (i.e., `tf.Model`s), for
* which an `Array` of `File`s is expected (in that order):
* - A JSON file containing the model topology and weight manifest.
* - Optionally, one or more binary files containing the binary weights.
* These files must have names that match the paths in the `weightsManifest`
* contained by the aforementioned JSON file, or errors will be thrown
* during loading. These weights files have the same format as the ones
* generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
* Python PIP package. If no weights files are provided, only the model
* topology will be loaded from the JSON file above.
* @returns An instance of `Files` `IOHandler`.
*
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
declare function browserFiles(files: File[]): IOHandler;
/**
* Deprecated. Use `tf.io.http`.
* @param path
* @param loadOptions
*/
declare function browserHTTPRequest(path: string, loadOptions?: LoadOptions): IOHandler;
/**
* Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.
*
* The values are stored in CPU as `TypedArray`. Fill the buffer using
* `buffer.set()`, or by modifying directly `buffer.values`.
*
* When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with
* those values.
*
* ```js
* // Create a buffer and set values at particular indices.
* const buffer = tf.buffer([2, 2]);
* buffer.set(3, 0, 0);
* buffer.set(5, 1, 0);
*
* // Convert the buffer back to a tensor.
* buffer.toTensor().print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
* @param dtype The dtype of the buffer. Defaults to 'float32'.
* @param values The values of the buffer as `TypedArray`. Defaults to
* zeros.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
export declare function buffer<R extends Rank, D extends DataType = 'float32'>(shape: ShapeMap[R], dtype?: D, values?: DataTypeMap[D]): TensorBuffer<R, D>;
/**
* Returns the approximate number of bytes allocated in the string array - 2
* bytes per character. Computing the exact bytes for a native string in JS
* is not possible since it depends on the encoding of the html page that
* serves the website.
*/
declare function bytesFromStringArray(arr: Uint8Array[]): number;
declare function bytesPerElement(dtype: DataType): number;
/**
* Calculate the shape information for the output.
*
* @param update The tensor contains the update values.
* @param indices The tensor contains the indices for the update values.
* @param shape The shape of the output tensor.
*
* @returns ScatterShapeInfo
*/
declare function calculateShapes(updates: TensorInfo, indices: TensorInfo, shape: number[]): ScatterShapeInfo;
export declare const Cast = "Cast";
export declare const cast: typeof cast_;
/**
* Casts a `tf.Tensor` to a new dtype.
*
* ```js
* const x = tf.tensor1d([1.5, 2.5, 3]);
* tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function cast_<T extends Tensor>(x: T | TensorLike, dtype: DataType): T;
export declare interface CastAttrs {
dtype: DataType;
}
export declare type CastInputs = UnaryInputs;
export declare const Ceil = "Ceil";
export declare const ceil: typeof ceil_;
/**
* Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`
*
* ```js
* const x = tf.tensor1d([.6, 1.1, -3.3]);
*
* x.ceil().print(); // or tf.ceil(x)
* ```
* @param x The input Tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function ceil_<T extends Tensor>(x: T | TensorLike): T;
export declare type CeilInputs = UnaryInputs;
declare function checkConversionForErrors<D extends DataType>(vals: DataTypeMap[D] | number[], dtype: D): void;
/**
* Checks that the dimension sizes from different input tensors match the
* equation.
*/
declare function checkEinsumDimSizes(nDims: number, idDims: number[][], tensors: Tensor[]): void;
/**
* Check validity of pad when using dimRoundingMode.
* @param opDesc A string of op description
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid` output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
* @throws unknown padding parameter
*/
declare function checkPadOnDimRoundingMode(opDesc: string, pad: 'valid' | 'same' | number | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): void;
/** Clamps a value to a specified range. */
declare function clamp(min: number, x: number, max: number): number;
export declare const ClipByValue = "ClipByValue";
export declare const clipByValue: typeof clipByValue_;
/**
* Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ```
* @param x The input tensor.
* @param clipValueMin Lower bound of range to be clipped to.
* @param clipValueMax Upper bound of range to be clipped to.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function clipByValue_<T extends Tensor>(x: T | TensorLike, clipValueMin: number, clipValueMax: number): T;
export declare interface ClipByValueAttrs {
clipValueMin: number;
clipValueMax: number;
}
export declare type ClipByValueInputs = UnaryInputs;
export declare const clone: typeof clone_;
/**
* Creates a new tensor with the same values and shape as the specified
* tensor.
*
* ```js
* const x = tf.tensor([1, 2]);
*
* x.clone().print();
* ```
*
* @param x The tensor to clone.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function clone_<T extends Tensor>(x: T | TensorLike): T;
declare function collectGatherOpShapeInfo(x: TensorInfo, indices: TensorInfo, axis: number, batchDims: number): GatherOpShapeInfo;
declare function combineLocations(outputLoc: number[], reduceLoc: number[], axes: number[]): number[];
declare function combineRaggedTensorToTensorShapes(raggedRank: number, shape: number[], valueShape: number[]): number[];
export declare const Complex = "Complex";
export declare const complex: typeof complex_;
/**
* Converts two real numbers to a complex number.
*
* Given a tensor `real` representing the real part of a complex number, and a
* tensor `imag` representing the imaginary part of a complex number, this
* operation returns complex numbers elementwise of the form [r0, i0, r1, i1],
* where r represents the real part and i represents the imag part.
*
* The input tensors real and imag must have the same shape.
*
* ```js
* const real = tf.tensor1d([2.25, 3.25]);
* const imag = tf.tensor1d([4.75, 5.75]);
* const complex = tf.complex(real, imag);
*
* complex.print();
* ```
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
declare function complex_<T extends Tensor>(real: T | TensorLike, imag: T | TensorLike): T;
export declare const ComplexAbs = "ComplexAbs";
export declare type ComplexAbsInputs = UnaryInputs;
export declare type ComplexInputs = Pick<NamedTensorInfoMap, 'real' | 'imag'>;
/**
* Extracts even indexed complex values in the given array.
* @param complex The complex tensor values
*/
declare function complexWithEvenIndex(complex: Float32Array): {
real: Float32Array;
imag: Float32Array;
};
/**
* Extracts odd indexed comple values in the given array.
* @param complex The complex tensor values
*/
declare function complexWithOddIndex(complex: Float32Array): {
real: Float32Array;
imag: Float32Array;
};
/**
* Wraps a list of ArrayBuffers into a `slice()`-able object without allocating
* a large ArrayBuffer.
*
* Allocating large ArrayBuffers (~2GB) can be unstable on Chrome. TFJS loads
* its weights as a list of (usually) 4MB ArrayBuffers and then slices the
* weight tensors out of them. For small models, it's safe to concatenate all
* the weight buffers into a single ArrayBuffer and then slice the weight
* tensors out of it, but for large models, a different approach is needed.
*/
declare class CompositeArrayBuffer {
private shards;
private previousShardIndex;
private bufferUniformSize?;
readonly byteLength: number;
/**
* Concatenate a number of ArrayBuffers into one.
*
* @param buffers An array of ArrayBuffers to concatenate, or a single
* ArrayBuffer.
* @returns Result of concatenating `buffers` in order.
*/
static join(buffers?: ArrayBuffer[] | ArrayBuffer): ArrayBuffer;
constructor(buffers?: ArrayBuffer | ArrayBuffer[] | TypedArray | TypedArray[]);
slice(start?: number, end?: number): ArrayBuffer;
/**
* Get the index of the shard that contains the byte at `byteIndex`.
*/
private findShardForByte;
}
/**
* Computes the information for a forward pass of a convolution/pooling
* operation.
*/
declare function computeConv2DInfo(inShape: [number, number, number, number], filterShape: [number, number, number, number], strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast'): Conv2DInfo;
/**
* Computes the information for a forward pass of a 3D convolution/pooling
* operation.
*/
declare function computeConv3DInfo(inShape: [number, number, number, number, number], filterShape: [number, number, number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast', roundingMode?: 'floor' | 'round' | 'ceil'): Conv3DInfo;
declare function computeDefaultPad(inputShape: [number, number] | [number, number, number, number], fieldSize: number, stride: number, dilation?: number): number;
/**
*
* @param inputShape Input tensor shape is of the following dimensions:
* `[batch, height, width, inChannels]`.
* @param filterShape The filter shape is of the following dimensions:
* `[filterHeight, filterWidth, depth]`.
* @param strides The strides of the sliding window for each dimension of the
* input tensor: `[strideHeight, strideWidth]`.
* If `strides` is a single number,
* then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1*1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dataFormat The data format of the input and output data.
* Defaults to 'NHWC'.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.
* Defaults to `[1, 1]`. If `dilations` is a single number, then
* `dilationHeight == dilationWidth`.
*/
declare function computeDilation2DInfo(inputShape: [number, number, number, number], filterShape: [number, number, number], strides: number | [number, number], pad: 'same' | 'valid' | number, dataFormat: 'NHWC', dilations: number | [number, number]): Conv2DInfo;
declare function computeFlatOffset(begin: number[], strides: number[]): number;
declare function computeOptimalWindowSize(inSize: number): number;
declare function computeOutAndReduceShapes(aShape: number[], axes: number[]): [number[], number[]];
/*