@tensorflow-models/coco-ssd
Version:
Object detection model (coco-ssd) in TensorFlow.js
146 lines (145 loc) • 5.73 kB
TypeScript
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import { BackendTimingInfo, DataMover, KernelBackend } from './kernels/backend';
import { DataId, Tensor, Tensor3D, TensorTracker, Variable } from './tensor';
import { NamedTensorMap, NamedVariableMap, TensorContainer } from './tensor_types';
import { DataValues } from './types';
/**
* A function that computes an output. The save function is for saving tensors
* computed in the forward pass, that we need in the backward pass.
*/
export declare type ForwardFunc<T> = (backend: KernelBackend, save?: <S extends Tensor>(tensor: S) => S) => T;
/**
* @docalias (a: Tensor, b: Tensor,...) => {
* value: Tensor, * gradFunc: (dy: Tensor) => Tensor | Tensor[] * }
*/
export declare type CustomGradientFunc<T extends Tensor> = (...args: Tensor[]) => {
value: T;
gradFunc: (dy: T) => Tensor | Tensor[];
};
export declare type MemoryInfo = {
numTensors: number;
numDataBuffers: number;
numBytes: number;
unreliable?: boolean;
reasons: string[];
};
declare type KernelProfile = {
name: string;
bytesAdded: number;
totalBytesSnapshot: number;
tensorsAdded: number;
totalTensorsSnapshot: number;
inputShapes: number[][];
outputShape: number[] | number[][];
};
export declare type ProfileInfo = {
newBytes: number;
newTensors: number;
peakBytes: number;
kernels: KernelProfile[];
result: TensorContainer;
};
export interface TimingInfo extends BackendTimingInfo {
wallMs: number;
}
/** @docalias Function */
export declare type ScopeFn<T extends TensorContainer> = () => T;
export interface TensorManager {
registerTensor(a: Tensor, backend?: KernelBackend): void;
registerVariable(v: Variable): void;
disposeTensor(a: Tensor): void;
memory(): {
numDataBuffers: number;
numBytes: number;
};
}
export declare class Engine implements TensorManager, TensorTracker, DataMover {
backend: KernelBackend;
safeMode: boolean;
private debugMode;
registeredVariables: NamedVariableMap;
private nextTapeNodeId;
private numBytes;
private numTensors;
private numStringTensors;
private numDataBuffers;
private profiling;
private activeProfile;
private activeTape;
private gradientScopeCount;
private customGradientDepth;
private activeScope;
private scopeStack;
private keepTensors;
private profiler;
private tensorInfo;
constructor(backend: KernelBackend, safeMode: boolean, debugMode: () => boolean);
moveData(dataId: DataId): void;
tidy<T extends TensorContainer>(nameOrFn: string | ScopeFn<T>, fn?: ScopeFn<T>, gradMode?: boolean): T;
private scopedRun;
private static nextTensorId;
nextTensorId(): number;
private static nextVariableId;
nextVariableId(): number;
runKernel<T extends Tensor | Tensor[], I extends NamedTensorMap>(forwardFunc: ForwardFunc<T>, inputs: I, backwardsFunc?: (dy: T, saved: Tensor[]) => {
[P in keyof I]: () => I[P];
}): T;
registerTensor(a: Tensor | Variable, backend?: KernelBackend): void;
registerVariable(v: Variable): void;
disposeTensor(a: Tensor): void;
disposeVariables(): void;
memory(): MemoryInfo;
profile(query: () => TensorContainer): Promise<ProfileInfo>;
private shouldRecord;
private addTapeNode;
keep<T extends Tensor>(result: T): T;
/**
* Start a scope. Use this with endScope() to achieve the same functionality
* as scope() without the need for a function closure.
*/
startScope(name?: string, gradientsMode?: boolean): void;
/**
* End a scope. Use this with startScope() to achieve the same functionality
* as scope() without the need for a function closure.
*/
endScope(result?: TensorContainer, gradientsMode?: boolean): void;
/**
* Returns gradients of `f` with respect to each of the `xs`. The gradients
* returned are of the same length as `xs`, but some might be null if `f` was
* not a function of that `x`. It also takes optional dy to multiply the
* gradient, which defaults to `1`.
*/
gradients<T extends Tensor>(f: () => T, xs: Tensor[], dy?: T, allowNoGradients?: boolean): {
value: T;
grads: Tensor[];
};
customGrad<T extends Tensor>(f: CustomGradientFunc<T>): (...args: Tensor[]) => T;
write(dataId: DataId, values: DataValues): void;
readSync(dataId: DataId): DataValues;
read(dataId: DataId): Promise<DataValues>;
fromPixels(pixels: ImageData | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement, numChannels: number): Tensor3D;
time(query: () => void): Promise<TimingInfo>;
/**
* Tracks a Tensor in the current scope to be automatically cleaned up
* when the current scope ends, and returns the value.
*
* @param result The Tensor to track in the current scope.
*/
private track;
}
export {};