@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
42 lines (41 loc) • 1.76 kB
TypeScript
import { DTypeGpu } from '../../../tensor/gpu/interface';
import { GPUTensor } from '../../../tensor/gpu/tensor';
import Tensor, { DType } from '../../../types';
import { Module } from '../../module';
import { Optimizer } from '../optimizer';
/**
* Implements the Adam optimizer
*
* This is currently quite slow on the CPU and WASM backends. On the GPU
* backend, one update step is only slightly slower than an update step of SGD
* and will converge a lot quicker.
*/
export declare class Adam extends Optimizer {
lr: number;
beta1: number;
beta2: number;
epsilon: number;
moment1?: (Tensor<any> | undefined)[];
moment2?: (Tensor<any> | undefined)[];
moments?: GPUTensor<any>[];
t: number;
constructor(model: Module, lr?: number, beta1?: number, beta2?: number, epsilon?: number);
step(): void;
updateMoments<DTpe extends DType>(grad: Tensor<DTpe>, moment1: Tensor<DTpe> | undefined, moment2: Tensor<DTpe> | undefined): {
moment1New: Tensor<DTpe>;
moment2New: Tensor<DTpe>;
};
getCorrectedMoments<DTpe extends DType>(moment1: Tensor<DTpe>, moment2: Tensor<DTpe>): {
correctMoment1: Tensor<DTpe>;
correctMoment2: Tensor<DTpe>;
};
paramStep<DTpe extends DType>(value: Tensor<DTpe>, grad: Tensor<DTpe>, moment1: Tensor<DTpe> | undefined, moment2: Tensor<DTpe> | undefined): {
newValue: Tensor<DTpe>;
moment1: Tensor<DTpe> | undefined;
moment2: Tensor<DTpe> | undefined;
};
gpuParamStep<DTpe extends DTypeGpu>(value: GPUTensor<DTpe>, grad: GPUTensor<DTpe>, moments: GPUTensor<DTpe>): {
newValue: import("../../../tensor/gpu/interface").GPUTensorI;
moments: import("../../../tensor/gpu/interface").GPUTensorI;
};
}