@hoff97/tensor-js
Version:
PyTorch like deep learning inferrence library
45 lines (44 loc) • 1.63 kB
TypeScript
import { DTypeGpu, GPUTensorConstructor, GPUTensorI } from '../../../tensor/gpu/interface';
import { GPUMemoryAllocator } from '../../../tensor/gpu/memory';
import { Input, Operation } from '../operation';
export interface NormalizeOpInfo {
shapeX?: readonly number[];
widthX?: number;
heightX?: number;
shapeMean?: readonly number[];
widthMean?: number;
heightMean?: number;
shapeVariance?: readonly number[];
widthVariance?: number;
heightVariance?: number;
shapeScale?: readonly number[];
widthScale?: number;
heightScale?: number;
shapeBias?: readonly number[];
widthBias?: number;
heightBias?: number;
shapeOutput?: readonly number[];
widthOutput?: number;
heightOutput?: number;
epsilon?: number;
}
export interface NormalizeOpInput {
X: GPUTensorI;
Mean: GPUTensorI;
Variance: GPUTensorI;
epsilon: number;
Scale: GPUTensorI;
Bias: GPUTensorI;
}
export declare class NormalizeOperation<GPUTensor extends GPUTensorI> extends Operation<GPUTensor, NormalizeOpInfo, NormalizeOpInput> {
constructor(tensorConstructor: GPUTensorConstructor<GPUTensor>, dtype: DTypeGpu, allocator?: GPUMemoryAllocator);
getVariables(): string;
getUniformAttrs(): Input[];
getFragmentShader(info: NormalizeOpInfo): string;
getOutputShape(input: NormalizeOpInput): readonly number[];
getTextureNames(): string[];
calc(input: NormalizeOpInput): GPUTensor;
compile(info: NormalizeOpInfo): void;
getCompilationInfo(input: NormalizeOpInput): NormalizeOpInfo;
getInputInfoString(input: NormalizeOpInput): string;
}