UNPKG

@tensorflow-models/coco-ssd

Version:

Object detection model (coco-ssd) in TensorFlow.js

92 lines 4.11 kB
"use strict"; /** * @license * Copyright 2018 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ var __extends = (this && this.__extends) || (function () { var extendStatics = Object.setPrototypeOf || ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; return function (d, b) { extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; })(); Object.defineProperty(exports, "__esModule", { value: true }); var globals_1 = require("../globals"); var serialization_1 = require("../serialization"); /** @doc {heading: 'Training', subheading: 'Classes', namespace: 'train'} */ var Optimizer = /** @class */ (function (_super) { __extends(Optimizer, _super); function Optimizer() { return _super !== null && _super.apply(this, arguments) || this; } /** * Executes `f()` and minimizes the scalar output of `f()` by computing * gradients of y with respect to the list of trainable variables provided by * `varList`. If no list is provided, it defaults to all trainable variables. * * @param f The function to execute and whose output to minimize. * @param returnCost Whether to return the scalar cost value produced by * executing `f()`. * @param varList An optional list of variables to update. If specified, only * the trainable variables in varList will be updated by minimize. Defaults to * all trainable variables. */ /** @doc {heading: 'Training', subheading: 'Optimizers'} */ Optimizer.prototype.minimize = function (f, returnCost, varList) { if (returnCost === void 0) { returnCost = false; } var _a = this.computeGradients(f, varList), value = _a.value, grads = _a.grads; this.applyGradients(grads); // Dispose gradients. var varNames = Object.keys(grads); varNames.forEach(function (varName) { return grads[varName].dispose(); }); if (returnCost) { return value; } else { value.dispose(); return null; } }; /** * Executes f() and computes the gradient of the scalar output of f() with * respect to the list of trainable variables provided by `varList`. If no * list is provided, it defaults to all trainable variables. * * @param f The function to execute and whose output to use for computing * gradients with respect to variables. * @param varList An optional list of variables to compute gradients with * respect to. If specified, only the trainable variables in varList will have * gradients computed with respect to. Defaults to all trainable variables. */ Optimizer.prototype.computeGradients = function (f, varList) { return globals_1.variableGrads(f, varList); }; /** * Dispose the variables (if any) owned by this optimizer instance. */ Optimizer.prototype.dispose = function () { }; return Optimizer; }(serialization_1.Serializable)); exports.Optimizer = Optimizer; Object.defineProperty(Optimizer, Symbol.hasInstance, { value: function (instance) { return instance.minimize != null && instance.computeGradients != null && instance.applyGradients != null; } }); //# sourceMappingURL=optimizer.js.map