UNPKG

@tensorflow/tfjs-core

Version:

Hardware-accelerated JavaScript library for machine intelligence

94 lines (93 loc) 4.25 kB
/** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ /// <amd-module name="@tensorflow/tfjs-core/dist/ops/fused/conv2d" /> import { Tensor, Tensor3D, Tensor4D } from '../../tensor'; import { TensorLike } from '../../types'; import * as conv_util from '../conv_util'; import { Activation } from '../fused_types'; /** * Computes a 2D convolution over the input x, optionally fused with adding a * bias and applying an activation. * * ```js * const inputDepth = 2; * const inShape = [2, 2, 2, inputDepth]; * const outputDepth = 2; * const fSize = 1; * const pad = 0; * const strides = 1; * * const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, * 16], inShape); * const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth, * outputDepth]); * * tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC', * dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print(); * ``` * * @param obj An object with the following properties: * @param x The input tensor, of rank 4 or rank 3, of shape * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is * assumed. * @param filter The filter, rank 4, of shape * `[filterHeight, filterWidth, inDepth, outDepth]`. * @param strides The strides of the convolution: `[strideHeight, * strideWidth]`. * @param pad The type of padding algorithm. * - `same` and stride 1: output will be of same size as input, * regardless of filter size. * - `valid` output will be smaller than input if filter is larger * than 1x1. * - For more info, see this guide: * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to * "NHWC". Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: [batch, * height, width, channels]. Only "NHWC" is currently supported. * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` * in which we sample input values across the height and width dimensions * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single * number, then `dilationHeight == dilationWidth`. If it is greater than * 1, then all values of `strides` must be 1. * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is * provided, it will default to truncate. * @param bias Tensor to be added to the result. * @param activation Name of activation kernel (defaults to `linear`) to be * applied * after biasAdd. * @param preluActivationWeights Tensor of prelu weights to be applied as part * of a `prelu` activation, typically the same shape as `x`. * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu` * activation. */ declare function fusedConv2d_<T extends Tensor3D | Tensor4D>({ x, filter, strides, pad, dataFormat, dilations, dimRoundingMode, bias, activation, preluActivationWeights, leakyreluAlpha }: { x: T | TensorLike; filter: Tensor4D | TensorLike; strides: [number, number] | number; pad: 'valid' | 'same' | number | conv_util.ExplicitPadding; dataFormat?: 'NHWC' | 'NCHW'; dilations?: [number, number] | number; dimRoundingMode?: 'floor' | 'round' | 'ceil'; bias?: Tensor | TensorLike; activation?: Activation; preluActivationWeights?: Tensor; leakyreluAlpha?: number; }): T; export declare const conv2d: typeof fusedConv2d_; export {};