@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
95 lines (94 loc) • 4.96 kB
TypeScript
import { Activation } from '../../library';
import Tensor, { DType, PadMode } from '../../types';
import { TensorF32 as WTF32, TensorF64 as WTF64, TensorI32 as WTI32, TensorI16 as WTI16, TensorI8 as WTI8, TensorU32 as WTU32, TensorU16 as WTU16, TensorU8 as WTU8 } from '../../wasm/rust_wasm_tensor';
import { CPUTensor } from '../cpu/tensor';
export declare let tensorConstructor: {
[name: string]: any;
};
export declare const wasmLoaded: Promise<void>;
export declare type WT = {
float64: WTF64;
float32: WTF32;
int32: WTI32;
int16: WTI16;
int8: WTI8;
uint32: WTU32;
uint16: WTU16;
uint8: WTU8;
};
export declare type DTypeWasm = 'float64' | 'float32' | 'int32' | 'int16' | 'int8' | 'uint32' | 'uint16' | 'uint8';
export declare class WASMTensor<DTpe extends DTypeWasm = 'float32'> extends Tensor<DTpe> {
static range(start: number, limit: number, delta: number): WASMTensor<"float32">;
wasmTensor: WT[DTpe];
constructor(values: number[] | WT[DTpe], shape?: Uint32Array, dtype?: DTpe);
cast<DTpe2 extends DType>(dtype: DTpe2): Tensor<DTpe2>;
getValues(): any;
getShape(): readonly number[];
constantLike(value: number): Tensor<DTpe>;
singleConstant(value: number): Tensor<DTpe>;
delete(): void;
copy(): Tensor<DTpe>;
exp(): Tensor<DTpe>;
log(): Tensor<DTpe>;
sqrt(): Tensor<DTpe>;
abs(): Tensor<DTpe>;
sin(): Tensor<DTpe>;
cos(): Tensor<DTpe>;
tan(): Tensor<DTpe>;
asin(): Tensor<DTpe>;
acos(): Tensor<DTpe>;
atan(): Tensor<DTpe>;
sinh(): Tensor<DTpe>;
cosh(): Tensor<DTpe>;
tanh(): Tensor<DTpe>;
asinh(): Tensor<DTpe>;
acosh(): Tensor<DTpe>;
atanh(): Tensor<DTpe>;
sigmoid(): Tensor<DTpe>;
hardSigmoid(alpha: number, beta: number): Tensor<DTpe>;
negate(): Tensor<DTpe>;
powerScalar(power: number, factor: number): Tensor<DTpe>;
addMultiplyScalar(factor: number, add: number): Tensor<DTpe>;
sign(): Tensor<DTpe>;
setValues(values: Tensor<DTpe>, starts: number[]): Tensor<DTpe>;
add_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, _resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
subtract_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number, beta: number): Tensor<DTpe>;
multiply_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
divide_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[], alpha: number): Tensor<DTpe>;
power_impl(th: Tensor<DTpe>, tensor: Tensor<DTpe>, resultShape: readonly number[]): Tensor<DTpe>;
matMul(tensor: Tensor<DTpe>): Tensor<DTpe>;
gemm_impl(b: Tensor<DTpe>, aTranspose: boolean, bTranspose: boolean, alpha: number, beta: number, c?: Tensor<DTpe>): Tensor<DTpe>;
sum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
sumSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
product_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
max_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
min_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceMean_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
reduceMeanSquare_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSum_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
protected reduceLogSumExp_impl(axes: number[], keepDims: boolean): Tensor<DTpe>;
getActivationFlag(activation: Activation): 0 | 1 | 2;
conv_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[], activation: Activation, bias?: Tensor<DTpe>): Tensor<DTpe>;
protected convTranspose_impl(kernel: Tensor<DTpe>, dilations: number[], group: number, pads: number[], strides: number[]): Tensor<DTpe>;
averagePool_impl(kernelShape: number[], pads: number[], strides: number[], includePad: boolean): Tensor<DTpe>;
reshape_impl(shape: number[]): Tensor<DTpe>;
concat(tensor: Tensor<DTpe>, axis: number): Tensor<DTpe>;
transpose_impl(permutation: number[]): Tensor<DTpe>;
clip(min?: number, max?: number): Tensor<DTpe>;
clipBackward(grad: Tensor<DTpe>, min?: number, max?: number): Tensor<DTpe>;
repeat(repeats: number[]): Tensor<DTpe>;
expand(shape: readonly number[]): Tensor<DTpe>;
static padModeToInt: {
constant: number;
reflect: number;
edge: number;
};
pad_impl(pads: number[], mode: PadMode, value: number): Tensor<DTpe>;
gather(axis: number, indices: CPUTensor<'uint32'>): Tensor<DTpe>;
floor(): Tensor<DTpe>;
ceil(): Tensor<DTpe>;
round(): Tensor<DTpe>;
slice_impl(starts: number[], ends: number[], axes: number[], steps: number[]): Tensor<DTpe>;
upsample(scales: number[]): Tensor<DTpe>;
normalize(mean: Tensor<DTpe>, variance: Tensor<DTpe>, epsilon: number, scale: Tensor<DTpe>, bias: Tensor<DTpe>): Tensor<DTpe>;
}