UNPKG

@tensorflow-models/coco-ssd

Version:

Object detection model (coco-ssd) in TensorFlow.js

214 lines (213 loc) 7.55 kB
/** * @license * Copyright 2018 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import { CustomGradientFunc, ScopeFn } from './engine'; import { Scalar, Tensor, Variable } from './tensor'; import { NamedTensorMap, TensorContainer } from './tensor_types'; /** * Create a new gradient scope. Similar to scope, but forces all inner scopes * to not clean up so that gradient operations can be used inside of this * scope. * @param nameOrScopeFn The name of the scope, or the function to execute. * If a name is provided, the 2nd argument should be the function. * If a name is provided, and debug mode is on, the timing and the memory * usage of the function will be tracked and displayed on the console * using the provided name. * @param scopeFn The function to execute. */ declare function gradScope<T extends TensorContainer>(nameOrScopeFn: string | ScopeFn<T>, scopeFn?: ScopeFn<T>): T; /** * Provided `f(x)`, returns another function `g(x, dy?)`, which gives the * gradient of `f(x)` with respect to `x`. * * If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to * `x` is computed instead. `f(x)` must take a single tensor `x` and return a * single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead. * * ```js * // f(x) = x ^ 2 * const f = x => x.square(); * // f'(x) = 2x * const g = tf.grad(f); * * const x = tf.tensor1d([2, 3]); * g(x).print(); * ``` * * ```js * // f(x) = x ^ 3 * const f = x => x.pow(tf.scalar(3, 'int32')); * // f'(x) = 3x ^ 2 * const g = tf.grad(f); * // f''(x) = 6x * const gg = tf.grad(g); * * const x = tf.tensor1d([2, 3]); * gg(x).print(); * ``` * * @param f The function f(x), to compute gradient for. */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function grad<I extends Tensor, O extends Tensor>(f: (x: I) => O): (x: I, dy?: O) => I; /** * Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`, * which gives an array of gradients of `f()` with respect to each input * [`x1`,`x2`,...]. * * If `dy` is passed when calling `g()`, the gradient of * `f(x1,...).mul(dy).sum()` with respect to each input is computed instead. * The provided `f` must take one or more tensors and return a single tensor * `y`. If `f()` takes a single input, we recommend using `tf.grad` instead. * * ```js * // f(a, b) = a * b * const f = (a, b) => a.mul(b); * // df / da = b, df / db = a * const g = tf.grads(f); * * const a = tf.tensor1d([2, 3]); * const b = tf.tensor1d([-2, -3]); * const [da, db] = g([a, b]); * console.log('da'); * da.print(); * console.log('db'); * db.print(); * ``` * * @param f The function `f(x1, x2,...)` to compute gradients for. */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function grads<O extends Tensor>(f: (...args: Tensor[]) => O): (args: Tensor[], dy?: O) => Tensor[]; /** * Like `tf.grad`, but also returns the value of `f()`. Useful when `f()` * returns a metric you want to show. * * The result is a rich object with the following properties: * - grad: The gradient of `f(x)` w.r.t `x` (result of `tf.grad`). * - value: The value returned by `f(x)`. * * ```js * // f(x) = x ^ 2 * const f = x => x.square(); * // f'(x) = 2x * const g = tf.valueAndGrad(f); * * const x = tf.tensor1d([2, 3]); * const {value, grad} = g(x); * * console.log('value'); * value.print(); * console.log('grad'); * grad.print(); * ``` */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function valueAndGrad<I extends Tensor, O extends Tensor>(f: (x: I) => O): (x: I, dy?: O) => { value: O; grad: I; }; /** * Like `tf.grads`, but returns also the value of `f()`. Useful when `f()` * returns a metric you want to show. * * The result is a rich object with the following properties: * - grads: The gradients of `f()` w.r.t each input (result of `tf.grads`). * - value: The value returned by `f(x)`. * * ```js * // f(a, b) = a * b * const f = (a, b) => a.mul(b); * // df/da = b, df/db = a * const g = tf.valueAndGrads(f); * * const a = tf.tensor1d([2, 3]); * const b = tf.tensor1d([-2, -3]); * const {value, grads} = g([a, b]); * * const [da, db] = grads; * * console.log('value'); * value.print(); * * console.log('da'); * da.print(); * console.log('db'); * db.print(); * ``` */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function valueAndGrads<O extends Tensor>(f: (...args: Tensor[]) => O): (args: Tensor[], dy?: O) => { grads: Tensor[]; value: O; }; /** * Computes and returns the gradient of f(x) with respect to the list of * trainable variables provided by `varList`. If no list is provided, it * defaults to all trainable variables. * * ```js * const a = tf.variable(tf.tensor1d([3, 4])); * const b = tf.variable(tf.tensor1d([5, 6])); * const x = tf.tensor1d([1, 2]); * * // f(a, b) = a * x ^ 2 + b * x * const f = () => a.mul(x.square()).add(b.mul(x)).sum(); * // df/da = x ^ 2, df/db = x * const {value, grads} = tf.variableGrads(f); * * Object.keys(grads).forEach(varName => grads[varName].print()); * ``` * * @param f The function to execute. f() should return a scalar. * @param varList The list of trainable variables. Defaults to all variables. */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function variableGrads(f: () => Scalar, varList?: Variable[]): { value: Scalar; grads: NamedTensorMap; }; /** * Overrides the gradient computation of a function `f`. * * Takes a function * `f(...inputs) => {value: Tensor, gradFunc: dy => Tensor[]}` and returns * another function `g(...inputs)` which takes the same inputs as `f`. When * called, `g` returns `f().value`. In backward mode, custom gradients with * respect to each input of `f` are computed using `f().gradFunc`. * * ```js * const customOp = tf.customGrad(x => { * // Override gradient of our custom x ^ 2 op to be dy * abs(x); * return {value: x.square(), gradFunc: dy => [dy.mul(x.abs())]}; * }); * * const x = tf.tensor1d([-1, -2, 3]); * const dx = tf.grad(x => customOp(x)); * * console.log(`f(x):`); * customOp(x).print(); * console.log(`f'(x):`); * dx(x).print(); * ``` * * @param f The function to evaluate in forward mode, which should return * `{value: Tensor, gradFunc: (dy) => Tensor[]}`, where `gradFunc` returns * the custom gradients of `f` with respect to its inputs. */ /** @doc {heading: 'Training', subheading: 'Gradients'} */ declare function customGrad<T extends Tensor>(f: CustomGradientFunc<T>): (...args: Tensor[]) => T; export { gradScope, customGrad, variableGrads, valueAndGrad, valueAndGrads, grad, grads, };