face-api.js
Version:
JavaScript API for face detection and face recognition in the browser with tensorflow.js
58 lines • 2.89 kB
JavaScript
import { __awaiter, __generator } from "tslib";
import { FaceDetection } from '../classes/FaceDetection';
import { env } from '../env';
import { createCanvas } from './createCanvas';
import { getContext2dOrThrow } from './getContext2dOrThrow';
import { imageTensorToCanvas } from './imageTensorToCanvas';
import { toNetInput } from './toNetInput';
/**
* Extracts the image regions containing the detected faces.
*
* @param input The image that face detection has been performed on.
* @param detections The face detection results or face bounding boxes for that image.
* @returns The Canvases of the corresponding image region for each detected face.
*/
export function extractFaces(input, detections) {
return __awaiter(this, void 0, void 0, function () {
var Canvas, canvas, netInput, tensorOrCanvas, _a, ctx, boxes;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
Canvas = env.getEnv().Canvas;
canvas = input;
if (!!(input instanceof Canvas)) return [3 /*break*/, 5];
return [4 /*yield*/, toNetInput(input)];
case 1:
netInput = _b.sent();
if (netInput.batchSize > 1) {
throw new Error('extractFaces - batchSize > 1 not supported');
}
tensorOrCanvas = netInput.getInput(0);
if (!(tensorOrCanvas instanceof Canvas)) return [3 /*break*/, 2];
_a = tensorOrCanvas;
return [3 /*break*/, 4];
case 2: return [4 /*yield*/, imageTensorToCanvas(tensorOrCanvas)];
case 3:
_a = _b.sent();
_b.label = 4;
case 4:
canvas = _a;
_b.label = 5;
case 5:
ctx = getContext2dOrThrow(canvas);
boxes = detections.map(function (det) { return det instanceof FaceDetection
? det.forSize(canvas.width, canvas.height).box.floor()
: det; })
.map(function (box) { return box.clipAtImageBorders(canvas.width, canvas.height); });
return [2 /*return*/, boxes.map(function (_a) {
var x = _a.x, y = _a.y, width = _a.width, height = _a.height;
var faceImg = createCanvas({ width: width, height: height });
getContext2dOrThrow(faceImg)
.putImageData(ctx.getImageData(x, y, width, height), 0, 0);
return faceImg;
})];
}
});
});
}
//# sourceMappingURL=extractFaces.js.map