@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
116 lines (115 loc) • 5.84 kB
TypeScript
import { Tensor } from '../library';
import { CPUTensor } from '../tensor/cpu/tensor';
import { TensorValues, Activation, PadMode, DType } from '../types';
import { BackwardOp, VariableI } from './types';
import { Backend } from '../util/convert';
import REGL from 'regl';
export interface VariableOptions<DTpe extends DType> {
/**
* The gradient can optionally be specified
*/
grad?: Tensor<DTpe>;
/**
* Backward edge of this variable
*
* You most likely do not want to use this
*/
backEdge?: BackwardOp<DTpe>;
/**
* When set to true, gradients will not be tracked for this
* variable. Useful for data that is passed into a model.
*/
noGrad?: boolean;
}
/**
* Tensor that also has a gradient associated to it
* When noGrad is false, a dynamic computation graph on
* this variable will be build.
*
* Once backward on a scalar variable (eg. a variable with shape [1])
* is called, the gradients for all variables will be computed
*/
export declare class Variable<DTpe extends DType = 'float32'> extends Tensor<DTpe> implements VariableI<DTpe> {
value: Tensor<DTpe>;
grad?: Tensor<DTpe>;
backEdge?: BackwardOp<DTpe>;
noGrad: boolean;
/**
* Creates a variable whose value is the specified value
*/
constructor(value: Tensor<DTpe>, options?: VariableOptions<DTpe>);
static create<DTpe extends DType>(shape: ReadonlyArray<number>, values: number[], backend: Backend, options?: VariableOptions<DTpe>, dtype?: DTpe): Variable<DTpe>;
/**
* Creates a GPU variable from texture data (eg. Image/Video element)
*/
static fromData(data: REGL.TextureImageData, options?: VariableOptions<'float32'>): Variable<'float32'>;
cast<DTpe2 extends DType>(dtype: DTpe2): Tensor<DTpe2>;
/**
* Performs a backward pass and returns wether the grad is needed or can be deleted
*/
backward(grad?: Tensor<DTpe>): boolean;
isLeaf(): boolean;
constantLike(value: number): Tensor<DTpe>;
singleConstant(value: number): Tensor<DTpe>;
getValues(): Promise<TensorValues[DTpe]>;
getShape(): readonly number[];
delete(): void;
protected reshape_impl(shape: readonly number[], copy: boolean): Tensor<DTpe>;
exp(): Tensor<DTpe>;
log(): Tensor<DTpe>;
sqrt(): Tensor<DTpe>;
abs(): Tensor<DTpe>;
sin(): Tensor<DTpe>;
cos(): Tensor<DTpe>;
tan(): Tensor<DTpe>;
asin(): Tensor<DTpe>;
acos(): Tensor<DTpe>;
atan(): Tensor<DTpe>;
sinh(): Tensor<DTpe>;
cosh(): Tensor<DTpe>;
tanh(): Tensor<DTpe>;
asinh(): Tensor<DTpe>;
acosh(): Tensor<DTpe>;
atanh(): Tensor<DTpe>;
sigmoid(): Tensor<DTpe>;
hardSigmoid(alpha: number, beta: number): Tensor<DTpe>;
sign(): Tensor<DTpe>;
negate(): Tensor<DTpe>;
addMultiplyScalar(factor: number, add: number): Tensor<DTpe>;
powerScalar(power: number, factor: number): Tensor<DTpe>;
setValues(values: Tensor<DTpe>, starts: number[]): Tensor<DTpe>;
matMul(tensor: Tensor<DTpe>): Tensor<DTpe>;
concat(tensor: Tensor<DTpe>, axis: number): Tensor<DTpe>;
clip(min?: number, max?: number): Tensor<DTpe>;
clipBackward(grad: Tensor<DTpe>, min?: number, max?: number): Tensor<DTpe>;
repeat(repeats: number[]): Tensor<DTpe>;
expand(shape: readonly number[]): Tensor<DTpe>;
copy(): Tensor<DTpe>;
gather(axis: number, indices: CPUTensor<'uint32'>): Tensor<DTpe>;
floor(): Tensor<DTpe>;
ceil(): Tensor<DTpe>;
round(): Tensor<DTpe>;
upsample(scales: number[]): Tensor<DTpe>;
normalize(mean: Tensor<DTpe>, variance: Tensor<DTpe>, epsilon: number, scale: Tensor<DTpe>, bias: Tensor<DTpe>): Tensor<DTpe>;
add_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
subtract_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
multiply_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
divide_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
power_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[]): Tensor<DTpe>;
gemm_impl(b: Tensor<DTpe>, aTranspose: boolean, bTranspose: boolean, alpha: number, beta: number, C?: Tensor<DTpe>): Tensor<DTpe>;
protected sum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected sumSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected product_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected max_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected min_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceMean_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceMeanSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSumExp_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected conv_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[], activation: Activation, bias?: Tensor<DTpe>): Tensor<DTpe>;
protected convTranspose_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[]): Tensor<DTpe>;
protected pad_impl(pads: number[], mode: PadMode, value: number): Tensor<DTpe>;
protected averagePool_impl(kernelShape: number[], pads: number[], strides: number[], includePad: boolean): Tensor<DTpe>;
protected transpose_impl(permutation: number[]): Tensor<DTpe>;
protected slice_impl(starts: number[], ends: number[], axes: number[], steps: number[]): Tensor<DTpe>;
}