@vladmandic/face-api
Version:
FaceAPI: AI-powered Face Detection & Rotation Tracking, Face Description & Recognition, Age & Gender & Emotion Prediction for Browser and NodeJS using TensorFlow/JS
1,287 lines (1,152 loc) • 167 kB
TypeScript
/// <reference path="../src/types/webgpu.d.ts" />
declare const add: typeof add_;
/**
* Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
*
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
* const a = tf.scalar(5);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `tf.Tensor` to add.
* @param b The second `tf.Tensor` to add. Must have the same type as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function add_<T extends Tensor>(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
export declare type AgeAndGenderPrediction = {
age: number;
gender: Gender;
genderProbability: number;
};
export declare class AgeGenderNet extends NeuralNetwork<NetParams> {
private _faceFeatureExtractor;
constructor(faceFeatureExtractor?: TinyXception);
get faceFeatureExtractor(): TinyXception;
runNet(input: NetInput | tf.Tensor4D): NetOutput;
forwardInput(input: NetInput | tf.Tensor4D): NetOutput;
forward(input: TNetInput): Promise<NetOutput>;
predictAgeAndGender(input: TNetInput): Promise<AgeAndGenderPrediction | AgeAndGenderPrediction[]>;
protected getDefaultModelName(): string;
dispose(throwOnRedispose?: boolean): void;
loadClassifierParams(weights: Float32Array): void;
extractClassifierParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: NetParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: NetParams;
paramMappings: ParamMapping[];
};
}
export declare const allFaces: typeof allFacesSsdMobilenetv1;
export declare function allFacesSsdMobilenetv1(input: TNetInput, minConfidence?: number): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]>;
export declare function allFacesTinyYolov2(input: TNetInput, forwardParams?: ITinyYolov2Options): Promise<WithFaceDescriptor<WithFaceLandmarks<WithFaceDetection<{}>>>[]>;
declare enum AnchorPosition {
TOP_LEFT = "TOP_LEFT",
TOP_RIGHT = "TOP_RIGHT",
BOTTOM_LEFT = "BOTTOM_LEFT",
BOTTOM_RIGHT = "BOTTOM_RIGHT"
}
/** @docalias number[] */
declare interface ArrayMap {
R0: number;
R1: number[];
R2: number[][];
R3: number[][][];
R4: number[][][][];
R5: number[][][][][];
R6: number[][][][][][];
}
declare const avgPool: typeof avgPool_;
/**
* Computes the 2D average pooling of an image.
*
* @param x The input tensor, of rank 4 or rank 3 of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.
* @param filterSize The filter size: `[filterHeight, filterWidth]`. If
* `filterSize` is a single number, then `filterHeight == filterWidth`.
* @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If
* `strides` is a single number, then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm:
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function avgPool_<T extends Tensor3D | Tensor4D>(x: T | TensorLike, filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare function awaitMediaLoaded(media: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement): Promise<unknown>;
export declare type BatchNorm = {
sub: tf.Tensor1D;
truediv: tf.Tensor1D;
};
declare const batchNorm: typeof batchNorm_;
/**
* Batch normalization.
*
* As described in
* [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).
*
* Mean, variance, scale, and offset can be of two shapes:
* - The same shape as the input.
* - In the common case, the depth dimension is the last dimension of x, so
* the values would be a `tf.Tensor1D` of shape [depth].
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that parameters passed are of given rank
* - `tf.batchNorm2d`
* - `tf.batchNorm3d`
* - `tf.batchNorm4d`
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*
* @doc {heading: 'Operations', subheading: 'Normalization'}
*/
declare function batchNorm_<R extends Rank>(x: Tensor<R> | TensorLike, mean: Tensor<R> | Tensor1D | TensorLike, variance: Tensor<R> | Tensor1D | TensorLike, offset?: Tensor<R> | Tensor1D | TensorLike, scale?: Tensor<R> | Tensor1D | TensorLike, varianceEpsilon?: number): Tensor<R>;
export declare class BoundingBox extends Box implements IBoundingBox {
constructor(left: number, top: number, right: number, bottom: number, allowNegativeDimensions?: boolean);
}
export declare class Box<BoxType = any> implements IBoundingBox, IRect {
static isRect(rect: any): boolean;
static assertIsValidBox(box: any, callee: string, allowNegativeDimensions?: boolean): void;
private _x;
private _y;
private _width;
private _height;
constructor(_box: IBoundingBox | IRect, allowNegativeDimensions?: boolean);
get x(): number;
get y(): number;
get width(): number;
get height(): number;
get left(): number;
get top(): number;
get right(): number;
get bottom(): number;
get area(): number;
get topLeft(): Point;
get topRight(): Point;
get bottomLeft(): Point;
get bottomRight(): Point;
round(): Box<BoxType>;
floor(): Box<BoxType>;
toSquare(): Box<BoxType>;
rescale(s: IDimensions | number): Box<BoxType>;
pad(padX: number, padY: number): Box<BoxType>;
clipAtImageBorders(imgWidth: number, imgHeight: number): Box<BoxType>;
shift(sx: number, sy: number): Box<BoxType>;
padAtBorders(imageHeight: number, imageWidth: number): {
dy: number;
edy: number;
dx: number;
edx: number;
y: number;
ey: number;
x: number;
ex: number;
w: number;
h: number;
};
calibrate(region: Box): Box<any>;
}
declare type BoxPredictionParams = {
box_encoding_predictor: ConvParams;
class_predictor: ConvParams;
};
declare namespace browser {
export {
fromPixelsAsync,
toPixels,
draw_2 as draw,
fromPixels
}
}
/**
* Creates an IOHandler that loads model artifacts from user-selected files.
*
* This method can be used for loading from files such as user-selected files
* in the browser.
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* // Note: This code snippet won't run properly without the actual file input
* // elements in the HTML DOM.
*
* // Suppose there are two HTML file input (`<input type="file" ...>`)
* // elements.
* const uploadJSONInput = document.getElementById('upload-json');
* const uploadWeightsInput = document.getElementById('upload-weights');
* const model = await tf.loadLayersModel(tf.io.browserFiles(
* [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
* ```
*
* @param files `File`s to load from. Currently, this function supports only
* loading from files that contain Keras-style models (i.e., `tf.Model`s), for
* which an `Array` of `File`s is expected (in that order):
* - A JSON file containing the model topology and weight manifest.
* - Optionally, one or more binary files containing the binary weights.
* These files must have names that match the paths in the `weightsManifest`
* contained by the aforementioned JSON file, or errors will be thrown
* during loading. These weights files have the same format as the ones
* generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
* Python PIP package. If no weights files are provided, only the model
* topology will be loaded from the JSON file above.
* @returns An instance of `Files` `IOHandler`.
*
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
declare function browserFiles(files: File[]): IOHandler;
/**
* Deprecated. Use `tf.io.http`.
* @param path
* @param loadOptions
*/
declare function browserHTTPRequest(path: string, loadOptions?: LoadOptions): IOHandler;
export declare function bufferToImage(buf: Blob): Promise<HTMLImageElement>;
declare const cast: typeof cast_;
/**
* Casts a `tf.Tensor` to a new dtype.
*
* ```js
* const x = tf.tensor1d([1.5, 2.5, 3]);
* tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function cast_<T extends Tensor>(x: T | TensorLike, dtype: DataType): T;
/**
* Check validity of pad when using dimRoundingMode.
* @param opDesc A string of op description
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid` output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
* @throws unknown padding parameter
*/
declare function checkPadOnDimRoundingMode(opDesc: string, pad: 'valid' | 'same' | number | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil'): void;
declare const clipByValue: typeof clipByValue_;
/**
* Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)
* ```
* @param x The input tensor.
* @param clipValueMin Lower bound of range to be clipped to.
* @param clipValueMax Upper bound of range to be clipped to.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function clipByValue_<T extends Tensor>(x: T | TensorLike, clipValueMin: number, clipValueMax: number): T;
export declare class ComposableTask<T> {
then(onfulfilled: (value: T) => T | PromiseLike<T>): Promise<T>;
run(): Promise<T>;
}
/**
* Wraps a list of ArrayBuffers into a `slice()`-able object without allocating
* a large ArrayBuffer.
*
* Allocating large ArrayBuffers (~2GB) can be unstable on Chrome. TFJS loads
* its weights as a list of (usually) 4MB ArrayBuffers and then slices the
* weight tensors out of them. For small models, it's safe to concatenate all
* the weight buffers into a single ArrayBuffer and then slice the weight
* tensors out of it, but for large models, a different approach is needed.
*/
declare class CompositeArrayBuffer {
private shards;
private previousShardIndex;
private bufferUniformSize?;
readonly byteLength: number;
/**
* Concatenate a number of ArrayBuffers into one.
*
* @param buffers An array of ArrayBuffers to concatenate, or a single
* ArrayBuffer.
* @returns Result of concatenating `buffers` in order.
*/
static join(buffers?: ArrayBuffer[] | ArrayBuffer): ArrayBuffer;
constructor(buffers?: ArrayBuffer | ArrayBuffer[] | TypedArray | TypedArray[]);
slice(start?: number, end?: number): ArrayBuffer;
/**
* Get the index of the shard that contains the byte at `byteIndex`.
*/
private findShardForByte;
}
export declare class ComputeAllFaceDescriptorsTask<TSource extends WithFaceLandmarks<WithFaceDetection<{}>>> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource>[], TSource[]> {
run(): Promise<WithFaceDescriptor<TSource>[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask<WithFaceDescriptor<TSource>>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask<WithFaceDescriptor<TSource>>;
}
/**
* Computes the information for a forward pass of a convolution/pooling
* operation.
*/
declare function computeConv2DInfo(inShape: [number, number, number, number], filterShape: [number, number, number, number], strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast'): Conv2DInfo;
/**
* Computes the information for a forward pass of a 3D convolution/pooling
* operation.
*/
declare function computeConv3DInfo(inShape: [number, number, number, number, number], filterShape: [number, number, number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast', roundingMode?: 'floor' | 'round' | 'ceil'): Conv3DInfo;
declare function computeDefaultPad(inputShape: [number, number] | [number, number, number, number], fieldSize: number, stride: number, dilation?: number): number;
/**
*
* @param inputShape Input tensor shape is of the following dimensions:
* `[batch, height, width, inChannels]`.
* @param filterShape The filter shape is of the following dimensions:
* `[filterHeight, filterWidth, depth]`.
* @param strides The strides of the sliding window for each dimension of the
* input tensor: `[strideHeight, strideWidth]`.
* If `strides` is a single number,
* then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1*1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dataFormat The data format of the input and output data.
* Defaults to 'NHWC'.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.
* Defaults to `[1, 1]`. If `dilations` is a single number, then
* `dilationHeight == dilationWidth`.
*/
declare function computeDilation2DInfo(inputShape: [number, number, number, number], filterShape: [number, number, number], strides: number | [number, number], pad: 'same' | 'valid' | number, dataFormat: 'NHWC', dilations: number | [number, number]): Conv2DInfo;
/**
* Computes a 128 entry vector (face descriptor / face embeddings) from the face shown in an image,
* which uniquely represents the features of that persons face. The computed face descriptor can
* be used to measure the similarity between faces, by computing the euclidean distance of two
* face descriptors.
*
* @param inputs The face image extracted from the aligned bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns Face descriptor with 128 entries or array thereof in case of batch input.
*/
export declare const computeFaceDescriptor: (input: TNetInput) => Promise<Float32Array | Float32Array[]>;
export declare class ComputeFaceDescriptorsTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>;
protected input: TNetInput;
constructor(parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>, input: TNetInput);
}
declare function computePool2DInfo(inShape: [number, number, number, number], filterSize: [number, number] | number, strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'channelsFirst' | 'channelsLast'): Conv2DInfo;
/**
* Computes the information for a forward pass of a pooling3D operation.
*/
declare function computePool3DInfo(inShape: [number, number, number, number, number], filterSize: number | [number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW'): Conv3DInfo;
declare function computeReshapedDimensions({ width, height }: IDimensions, inputSize: number): Dimensions;
export declare class ComputeSingleFaceDescriptorTask<TSource extends WithFaceLandmarks<WithFaceDetection<{}>>> extends ComputeFaceDescriptorsTaskBase<WithFaceDescriptor<TSource> | undefined, TSource | undefined> {
run(): Promise<WithFaceDescriptor<TSource> | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask<WithFaceDescriptor<TSource>>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask<WithFaceDescriptor<TSource>>;
}
declare const concat: typeof concat_;
/**
* Concatenates a list of `tf.Tensor`s along a given axis.
*
* The tensors ranks and types must match, and their sizes must match in all
* dimensions except `axis`.
*
* Also available are stricter rank-specific methods that assert that
* `tensors` are of the given rank:
* - `tf.concat1d`
* - `tf.concat2d`
* - `tf.concat3d`
* - `tf.concat4d`
*
* Except `tf.concat1d` (which does not have axis param), all methods have
* same signature as this method.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* a.concat(b).print(); // or a.concat(b)
* ```
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.concat([a, b, c]).print();
* ```
*
* ```js
* const a = tf.tensor2d([[1, 2], [10, 20]]);
* const b = tf.tensor2d([[3, 4], [30, 40]]);
* const axis = 1;
* tf.concat([a, b], axis).print();
* ```
* @param tensors A list of tensors to concatenate.
* @param axis The axis to concatenate along. Defaults to 0 (the first dim).
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
declare function concat_<T extends Tensor>(tensors: Array<T | TensorLike>, axis?: number): T;
/**
* Concatenate a number of ArrayBuffers into one.
*
* @param buffers An array of ArrayBuffers to concatenate, or a single
* ArrayBuffer.
* @returns Result of concatenating `buffers` in order.
*
* @deprecated Use tf.io.CompositeArrayBuffer.join() instead.
*/
declare function concatenateArrayBuffers(buffers: ArrayBuffer[] | ArrayBuffer): ArrayBuffer;
declare interface ContextOptions {
/**
* Optional. If the canvas has created a context, it would not make effects.
* If it is not set, it would be variable based on the current backend.
*/
contextType?: string;
/**
* Optional. A WebGLContextAttributes configuration. If the canvas has created
* a context, it would not make effects.
*/
contextAttributes?: WebGLContextAttributes;
}
declare const conv2d: typeof conv2d_;
/**
* Computes a 2D convolution over the input x.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter, rank 4, of shape
* `[filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels].
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function conv2d_<T extends Tensor3D | Tensor4D>(x: T | TensorLike, filter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
/**
* Information about the forward pass of a convolution/pooling operation.
* It includes input and output shape, strides, filter size and padding
* information.
*/
declare type Conv2DInfo = {
batchSize: number;
inHeight: number;
inWidth: number;
inChannels: number;
outHeight: number;
outWidth: number;
outChannels: number;
dataFormat: 'channelsFirst' | 'channelsLast';
strideHeight: number;
strideWidth: number;
dilationHeight: number;
dilationWidth: number;
filterHeight: number;
filterWidth: number;
effectiveFilterHeight: number;
effectiveFilterWidth: number;
padInfo: PadInfo;
inShape: [number, number, number, number];
outShape: [number, number, number, number];
filterShape: [number, number, number, number];
};
/**
* Information about the forward pass of a 3D convolution/pooling operation.
* It includes input and output shape, strides, filter size and padding
* information.
*/
declare type Conv3DInfo = {
batchSize: number;
inDepth: number;
inHeight: number;
inWidth: number;
inChannels: number;
outDepth: number;
outHeight: number;
outWidth: number;
outChannels: number;
dataFormat: 'channelsFirst' | 'channelsLast';
strideDepth: number;
strideHeight: number;
strideWidth: number;
dilationDepth: number;
dilationHeight: number;
dilationWidth: number;
filterDepth: number;
filterHeight: number;
filterWidth: number;
effectiveFilterDepth: number;
effectiveFilterHeight: number;
effectiveFilterWidth: number;
padInfo: PadInfo3D;
inShape: [number, number, number, number, number];
outShape: [number, number, number, number, number];
filterShape: [number, number, number, number, number];
};
declare namespace conv_util {
export {
computeDilation2DInfo,
computePool2DInfo,
computePool3DInfo,
computeConv2DInfo,
computeConv3DInfo,
computeDefaultPad,
tupleValuesAreOne,
eitherStridesOrDilationsAreOne,
stridesOrDilationsArePositive,
convertConv2DDataFormat,
checkPadOnDimRoundingMode,
ExplicitPadding,
PadInfo,
PadInfo3D,
Conv2DInfo,
Conv3DInfo
}
}
/**
* Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
* 'channelsLast'|'channelsFirst'
* @param dataFormat in 'NHWC'|'NCHW' mode
* @return dataFormat in 'channelsLast'|'channelsFirst' mode
* @throws unknown dataFormat
*/
declare function convertConv2DDataFormat(dataFormat: 'NHWC' | 'NCHW'): 'channelsLast' | 'channelsFirst';
declare type ConvLayerParams = {
conv: ConvParams;
scale: ScaleLayerParams;
};
declare type ConvParams = {
filters: tf.Tensor4D;
bias: tf.Tensor1D;
};
export declare type ConvWithBatchNorm = {
conv: ConvParams;
bn: BatchNorm;
};
/**
* Copy a model from one URL to another.
*
* This function supports:
*
* 1. Copying within a storage medium, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`
* 2. Copying between two storage mediums, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Copy the model, from Local Storage to IndexedDB.
* await tf.io.copyModel(
* 'localstorage://demo/management/model1',
* 'indexeddb://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Remove both models.
* await tf.io.removeModel('localstorage://demo/management/model1');
* await tf.io.removeModel('indexeddb://demo/management/model1');
* ```
*
* @param sourceURL Source URL of copying.
* @param destURL Destination URL of copying.
* @returns ModelArtifactsInfo of the copied model (if and only if copying
* is successful).
* @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or
* if `oldPath` and `newPath` are identical.
*
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
declare function copyModel(sourceURL: string, destURL: string): Promise<ModelArtifactsInfo>;
declare function createBrowserEnv(): Environment;
export declare function createCanvas({ width, height }: IDimensions): HTMLCanvasElement;
export declare function createCanvasFromMedia(media: HTMLImageElement | HTMLVideoElement | ImageData, dims?: IDimensions): HTMLCanvasElement;
export declare function createFaceDetectionNet(weights: Float32Array): SsdMobilenetv1;
export declare function createFaceRecognitionNet(weights: Float32Array): FaceRecognitionNet;
declare function createFileSystem(fs?: any): FileSystem_2;
declare function createNodejsEnv(): Environment;
export declare function createSsdMobilenetv1(weights: Float32Array): SsdMobilenetv1;
export declare function createTinyFaceDetector(weights: Float32Array): TinyFaceDetector;
export declare function createTinyYolov2(weights: Float32Array, withSeparableConvs?: boolean): TinyYolov2;
/**
* We wrap data id since we use weak map to avoid memory leaks.
* Since we have our own memory management, we have a reference counter
* mapping a tensor to its data, so there is always a pointer (even if that
* data is otherwise garbage collectable).
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/
* Global_Objects/WeakMap
*/
declare type DataId = object;
declare type DataToGPUOptions = DataToGPUWebGLOption;
declare interface DataToGPUWebGLOption {
customTexShape?: [number, number];
}
/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */
declare type DataType = keyof DataTypeMap;
declare interface DataTypeMap {
float32: Float32Array;
int32: Int32Array;
bool: Uint8Array;
complex64: Float32Array;
string: string[];
}
/**
* Decode flat ArrayBuffer as weights.
*
* This function does not handle sharding.
*
* This function is the reverse of `encodeWeights`.
*
* @param weightData A flat ArrayBuffer or an array of ArrayBuffers carrying the
* binary values of the tensors concatenated in the order specified in
* `specs`.
* @param specs Specifications of the names, dtypes and shapes of the tensors
* whose value are encoded by `buffer`.
* @return A map from tensor name to tensor value, with the names corresponding
* to names in `specs`.
* @throws Error, if any of the tensors has unsupported dtype.
*/
declare function decodeWeights(weightData: WeightData, specs: WeightsManifestEntry[]): NamedTensorMap;
declare function decodeWeightsStream(weightStream: ReadableStream<ArrayBuffer>, specs: WeightsManifestEntry[]): Promise<NamedTensorMap>;
export declare type DefaultTinyYolov2NetParams = {
conv0: ConvWithBatchNorm;
conv1: ConvWithBatchNorm;
conv2: ConvWithBatchNorm;
conv3: ConvWithBatchNorm;
conv4: ConvWithBatchNorm;
conv5: ConvWithBatchNorm;
conv6: ConvWithBatchNorm;
conv7: ConvWithBatchNorm;
conv8: ConvParams;
};
declare type DenseBlock3Params = {
conv0: SeparableConvParams | ConvParams;
conv1: SeparableConvParams;
conv2: SeparableConvParams;
};
declare type DenseBlock4Params = DenseBlock3Params & {
conv3: SeparableConvParams;
};
declare const depthwiseConv2d: typeof depthwiseConv2d_;
/**
* Depthwise 2D convolution.
*
* Given a 4D `input` array and a `filter` array of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
* `inChannels` convolutional filters of depth 1, this op applies a
* different filter to each input channel (expanding from 1 channel to
* `channelMultiplier` channels for each), then concatenates the results
* together. The output has `inChannels * channelMultiplier` channels.
*
* See
* [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
* https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
* for more details.
*
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter tensor, rank 4, of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`. If strides is a single number, then `strideHeight ==
* strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](
* https://www.tensorflow.org/api_docs/python/tf/nn/convolution)
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels]. Only "NHWC" is currently supported.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
declare function depthwiseConv2d_<T extends Tensor3D | Tensor4D>(x: T | TensorLike, filter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same' | number | conv_util.ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil'): T;
export declare class DetectAllFaceLandmarksTask<TSource extends WithFaceDetection<{}>> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource>[], TSource[]> {
run(): Promise<WithFaceLandmarks<TSource>[]>;
withFaceExpressions(): PredictAllFaceExpressionsWithFaceAlignmentTask<WithFaceLandmarks<TSource>>;
withAgeAndGender(): PredictAllAgeAndGenderWithFaceAlignmentTask<WithFaceLandmarks<TSource>>;
withFaceDescriptors(): ComputeAllFaceDescriptorsTask<WithFaceLandmarks<TSource>>;
}
export declare function detectAllFaces(input: TNetInput, options?: FaceDetectionOptions): DetectAllFacesTask;
export declare class DetectAllFacesTask extends DetectFacesTaskBase<FaceDetection[]> {
run(): Promise<FaceDetection[]>;
private runAndExtendWithFaceDetections;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectAllFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictAllFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictAllAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarks: (input: TNetInput) => Promise<FaceLandmarks68 | FaceLandmarks68[]>;
export declare class DetectFaceLandmarksTaskBase<TReturn, TParentReturn> extends ComposableTask<TReturn> {
protected parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>;
protected input: TNetInput;
protected useTinyLandmarkNet: boolean;
constructor(parentTask: ComposableTask<TParentReturn> | Promise<TParentReturn>, input: TNetInput, useTinyLandmarkNet: boolean);
protected get landmarkNet(): FaceLandmark68Net | FaceLandmark68TinyNet;
}
/**
* Detects the 68 point face landmark positions of the face shown in an image
* using a tinier version of the 68 point face landmark model, which is slightly
* faster at inference, but also slightly less accurate.
*
* @param inputs The face image extracted from the bounding box of a face. Can
* also be an array of input images, which will be batch processed.
* @returns 68 point face landmarks or array thereof in case of batch input.
*/
export declare const detectFaceLandmarksTiny: (input: TNetInput) => Promise<FaceLandmarks68 | FaceLandmarks68[]>;
export declare class DetectFacesTaskBase<TReturn> extends ComposableTask<TReturn> {
protected input: TNetInput;
protected options: FaceDetectionOptions;
constructor(input: TNetInput, options?: FaceDetectionOptions);
}
export declare const detectLandmarks: (input: TNetInput) => Promise<FaceLandmarks68 | FaceLandmarks68[]>;
export declare function detectSingleFace(input: TNetInput, options?: FaceDetectionOptions): DetectSingleFaceTask;
export declare class DetectSingleFaceLandmarksTask<TSource extends WithFaceDetection<{}>> extends DetectFaceLandmarksTaskBase<WithFaceLandmarks<TSource> | undefined, TSource | undefined> {
run(): Promise<WithFaceLandmarks<TSource> | undefined>;
withFaceExpressions(): PredictSingleFaceExpressionsWithFaceAlignmentTask<WithFaceLandmarks<TSource>>;
withAgeAndGender(): PredictSingleAgeAndGenderWithFaceAlignmentTask<WithFaceLandmarks<TSource>>;
withFaceDescriptor(): ComputeSingleFaceDescriptorTask<WithFaceLandmarks<TSource>>;
}
export declare class DetectSingleFaceTask extends DetectFacesTaskBase<FaceDetection | undefined> {
run(): Promise<FaceDetection | undefined>;
private runAndExtendWithFaceDetection;
withFaceLandmarks(useTinyLandmarkNet?: boolean): DetectSingleFaceLandmarksTask<{
detection: FaceDetection;
}>;
withFaceExpressions(): PredictSingleFaceExpressionsTask<{
detection: FaceDetection;
}>;
withAgeAndGender(): PredictSingleAgeAndGenderTask<{
detection: FaceDetection;
}>;
}
export declare class Dimensions implements IDimensions {
private _width;
private _height;
constructor(width: number, height: number);
get width(): number;
get height(): number;
reverse(): Dimensions;
}
declare const div: typeof div_;
/**
* Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 4, 9, 16]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
* const a = tf.tensor1d([2, 4, 6, 8]);
* const b = tf.scalar(2);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* @param a The first tensor as the numerator.
* @param b The second tensor as the denominator. Must have the same dtype as
* `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
declare function div_<T extends Tensor>(a: Tensor | TensorLike, b: Tensor | TensorLike): T;
declare namespace draw {
export {
drawContour,
drawDetections,
TDrawDetectionsInput,
drawFaceExpressions,
DrawFaceExpressionsInput,
IDrawBoxOptions,
DrawBoxOptions,
DrawBox,
drawFaceLandmarks,
IDrawFaceLandmarksOptions,
DrawFaceLandmarksOptions,
DrawFaceLandmarks,
DrawFaceLandmarksInput,
AnchorPosition,
IDrawTextFieldOptions,
DrawTextFieldOptions,
DrawTextField
}
}
export { draw }
/**
* Draws a `tf.Tensor` to a canvas.
*
* When the dtype of the input is 'float32', we assume values in the range
* [0-1]. Otherwise, when input is 'int32', we assume values in the range
* [0-255].
*
* @param image The tensor to draw on the canvas. Must match one of
* these shapes:
* - Rank-2 with shape `[height, width`]: Drawn as grayscale.
* - Rank-3 with shape `[height, width, 1]`: Drawn as grayscale.
* - Rank-3 with shape `[height, width, 3]`: Drawn as RGB with alpha set in
* `imageOptions` (defaults to 1, which is opaque).
* - Rank-3 with shape `[height, width, 4]`: Drawn as RGBA.
* @param canvas The canvas to draw to.
* @param options The configuration arguments for image to be drawn and the
* canvas to draw to.
*
* @doc {heading: 'Browser', namespace: 'browser'}
*/
declare function draw_2(image: Tensor2D | Tensor3D | TensorLike, canvas: HTMLCanvasElement, options?: DrawOptions): void;
declare class DrawBox {
box: Box;
options: DrawBoxOptions;
constructor(box: IBoundingBox | IRect, options?: IDrawBoxOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawBoxOptions {
boxColor: string;
lineWidth: number;
drawLabelOptions: DrawTextFieldOptions;
label?: string;
constructor(options?: IDrawBoxOptions);
}
declare function drawContour(ctx: CanvasRenderingContext2D, points: Point[], isClosed?: boolean): void;
declare function drawDetections(canvasArg: string | HTMLCanvasElement, detections: TDrawDetectionsInput | Array<TDrawDetectionsInput>): void;
declare function drawFaceExpressions(canvasArg: string | HTMLCanvasElement, faceExpressions: DrawFaceExpressionsInput | Array<DrawFaceExpressionsInput>, minConfidence?: number, textFieldAnchor?: IPoint): void;
declare type DrawFaceExpressionsInput = FaceExpressions | WithFaceExpressions<{}>;
declare class DrawFaceLandmarks {
faceLandmarks: FaceLandmarks;
options: DrawFaceLandmarksOptions;
constructor(faceLandmarks: FaceLandmarks, options?: IDrawFaceLandmarksOptions);
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare function drawFaceLandmarks(canvasArg: string | HTMLCanvasElement, faceLandmarks: DrawFaceLandmarksInput | Array<DrawFaceLandmarksInput>): void;
declare type DrawFaceLandmarksInput = FaceLandmarks | WithFaceLandmarks<WithFaceDetection<{}>>;
declare class DrawFaceLandmarksOptions {
drawLines: boolean;
drawPoints: boolean;
lineWidth: number;
pointSize: number;
lineColor: string;
pointColor: string;
constructor(options?: IDrawFaceLandmarksOptions);
}
declare interface DrawOptions {
/**
* Optional. An object of options to customize the values of image tensor.
*/
imageOptions?: ImageOptions;
/**
* Optional. An object to configure the context of the canvas to draw to.
*/
contextOptions?: ContextOptions;
}
declare class DrawTextField {
text: string[];
anchor: IPoint;
options: DrawTextFieldOptions;
constructor(text: string | string[] | DrawTextField, anchor: IPoint, options?: IDrawTextFieldOptions);
measureWidth(ctx: CanvasRenderingContext2D): number;
measureHeight(): number;
getUpperLeft(ctx: CanvasRenderingContext2D, canvasDims?: IDimensions): IPoint;
draw(canvasArg: string | HTMLCanvasElement | CanvasRenderingContext2D): void;
}
declare class DrawTextFieldOptions implements IDrawTextFieldOptions {
anchorPosition: AnchorPosition;
backgroundColor: string;
fontColor: string;
fontSize: number;
fontStyle: string;
padding: number;
constructor(options?: IDrawTextFieldOptions);
}
declare function eitherStridesOrDilationsAreOne(strides: number | number[], dilations: number | number[]): boolean;
/**
* Encode a map from names to weight values as an ArrayBuffer, along with an
* `Array` of `WeightsManifestEntry` as specification of the encoded weights.
*
* This function does not perform sharding.
*
* This function is the reverse of `decodeWeights`.
*
* @param tensors A map ("dict") from names to tensors.
* @param group Group to which the weights belong (optional).
* @returns A `Promise` of
* - A flat `ArrayBuffer` with all the binary values of the `Tensor`s
* concatenated.
* - An `Array` of `WeightManifestEntry`s, carrying information including
* tensor names, `dtype`s and shapes.
* @throws Error: on unsupported tensor `dtype`.
*/
declare function encodeWeights(tensors: NamedTensorMap | NamedTensor[], group?: WeightGroup): Promise<{
data: ArrayBuffer;
specs: WeightsManifestEntry[];
}>;
declare let ENV: Environment_2;
export declare const env: {
getEnv: typeof getEnv;
setEnv: typeof setEnv;
initialize: typeof initialize;
createBrowserEnv: typeof createBrowserEnv;
createFileSystem: typeof createFileSystem;
createNodejsEnv: typeof createNodejsEnv;
monkeyPatch: typeof monkeyPatch;
isBrowser: typeof isBrowser;
isNodejs: typeof isNodejs;
};
export declare type Environment = FileSystem_2 & {
Canvas: typeof HTMLCanvasElement;
CanvasRenderingContext2D: typeof CanvasRenderingContext2D;
Image: typeof HTMLImageElement;
ImageData: typeof ImageData;
Video: typeof HTMLVideoElement;
createCanvasElement: () => HTMLCanvasElement;
createImageElement: () => HTMLImageElement;
createVideoElement: () => HTMLVideoElement;
fetch: (url: string, init?: RequestInit) => Promise<Response>;
};
/**
* The environment contains evaluated flags as well as the registered platform.
* This is always used as a global singleton and can be retrieved with
* `tf.env()`.
*
* @doc {heading: 'Environment'}
*/
declare class Environment_2 {
global: any;
private flags;
private flagRegistry;
private urlFlags;
platformName: string;
platform: Platform;
getQueryParams: typeof getQueryParams;
constructor(global: any);
setPlatform(platformName: string, platform: Platform): void;
registerFlag(flagName: string, evaluationFn: FlagEvaluationFn, setHook?: (value: FlagValue) => void): void;
getAsync(flagName: string): Promise<FlagValue>;
get(flagName: string): FlagValue;
getNumber(flagName: string): number;
getBool(flagName: string): boolean;
getString(flagName: string): string;
getFlags(): Flags;
get features(): Flags;
set(flagName: string, value: FlagValue): void;
private evaluateFlag;
setFlags(flags: Flags): void;
reset(): void;
private populateURLFlags;
}
export declare function euclideanDistance(arr1: number[] | Float32Array, arr2: number[] | Float32Array): number;
declare const exp: typeof exp_;
/**
* Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
*
* ```js
* const x = tf.tensor1d([1, 2, -3]);
*
* x.exp().print(); // or tf.exp(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
declare function exp_<T extends Tensor>(x: T | TensorLike): T;
declare const expandDims: typeof expandDims_;
/**
* Returns a `tf.Tensor` that has expanded rank, by inserting a dimension
* into the tensor's shape.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* const axis = 1;
* x.expandDims(axis).print();
* ```
*
* @param x The input tensor whose dimensions are to be expanded.
* @param axis The dimension index at which to insert shape of `1`. Defaults
* to 0 (the first dimension).
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
declare function expandDims_<T extends Tensor>(x: Tensor | TensorLike, axis?: number): T;
declare type ExplicitPadding = [
[number, number],
[number, number],
[number, number],
[number, number]
];
export declare function extendWithAge<TSource>(sourceObj: TSource, age: number): WithAge<TSource>;
export declare function extendWithFaceDescriptor<TSource>(sourceObj: TSource, descriptor: Float32Array): WithFaceDescriptor<TSource>;
export declare function extendWithFaceDetection<TSource>(sourceObj: TSource, detection: FaceDetection): WithFaceDetection<TSource>;
export declare function extendWithFaceExpressions<TSource>(sourceObj: TSource, expressions: FaceExpressions): WithFaceExpressions<TSource>;
export declare function extendWithFaceLandmarks<TSource extends WithFaceDetection<{}>, TFaceLandmarks extends FaceLandmarks = FaceLandmarks68>(sourceObj: TSource, unshiftedLandmarks: TFaceLandmarks): WithFaceLandmarks<TSource, TFaceLandmarks>;
export declare function extendWithGender<TSource>(sourceObj: TSource, gender: Gender, genderProbability: number): WithGender<TSource>;
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export declare function extractFaces(input: TNetInput, detections: Array<FaceDetection | Rect>): Promise<HTMLCanvasElement[]>;
/**
* Extracts the tensors of the image regions containing the detected faces.
* Useful if you want to compute the face descriptors for the face images.
* Using this method is faster then extracting a canvas for each face and
* converting them to tensors individually.
*
* @param imageTensor The image tensor that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns Tensors of the corresponding image region for each detected face.
*/
export declare function extractFaceTensors(imageTensor: tf.Tensor3D | tf.Tensor4D, detections: Array<FaceDetection | Rect>): Promise<tf.Tensor3D[]>;
export declare const FACE_EXPRESSION_LABELS: readonly ["neutral", "happy", "sad", "angry", "fearful", "disgusted", "surprised"];
export declare class FaceDetection extends ObjectDetection implements IFaceDetecion {
constructor(score: number, relativeBox: Rect, imageDims: IDimensions);
forSize(width: number, height: number): FaceDetection;
}
export declare type FaceDetectionFunction = (input: TNetInput) => Promise<FaceDetection[]>;
export declare class FaceDetectionNet extends SsdMobilenetv1 {
}
export declare type FaceDetectionOptions = TinyFaceDetectorOptions | SsdMobilenetv1Options | TinyYolov2Options;
export declare class FaceExpressionNet extends FaceProcessor<FaceFeatureExtractorParams> {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
forwardInput(input: NetInput | tf.Tensor4D): tf.Tensor2D;
forward(input: TNetInput): Promise<tf.Tensor2D>;
predictExpressions(input: TNetInput): Promise<FaceExpressions | FaceExpressions[]>;
protected getDefaultModelName(): string;
protected getClassifierChannelsIn(): number;
protected getClassifierChannelsOut(): number;
}
export declare class FaceExpressions {
neutral: number;
happy: number;
sad: number;
angry: number;
fearful: number;
disgusted: number;
surprised: number;
constructor(probabilities: number[] | Float32Array);
asSortedArray(): {
expression: "neutral" | "happy" | "sad" | "angry" | "fearful" | "disgusted" | "surprised";
probability: number;
}[];
}
declare class FaceFeatureExtractor extends NeuralNetwork<FaceFeatureExtractorParams> implements IFaceFeatureExtractor<FaceFeatureExtractorParams> {
constructor();
forwardInput(input: NetInput): tf.Tensor4D;
forward(input: TNetInput): Promise<tf.Tensor4D>;
protected getDefaultModelName(): string;
protected extractParamsFromWeightMap(weightMap: tf.NamedTensorMap): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
protected extractParams(weights: Float32Array): {
params: FaceFeatureExtractorParams;
paramMappings: ParamMapping[];
};
}
declare type FaceFeatureExtractorParams = {
dense0: DenseBlock4Params;
dense1: DenseBlock4Params;
dense2: DenseBlock4Params;
dense3: DenseBlock4Params;
};
export declare class FaceLandmark68Net extends FaceLandmark68NetBase<FaceFeatureExtractorParams> {
constructor(faceFeatureExtractor?: FaceFeatureExtractor);
protected ge