@snap/camera-kit
Version:
Camera Kit Web
105 lines • 5.52 kB
JavaScript
import { __awaiter } from "tslib";
import { CameraKitSource } from "./CameraKitSource";
const getYUVImageData = (canvas, lensCore) => __awaiter(void 0, void 0, void 0, function* () {
const { width, height } = canvas;
const outputBuffer = new ArrayBuffer((width * height * 3) / 2);
yield lensCore.imageToYuvBuffer({ image: canvas, width, height, outputBuffer });
const pixels = new Uint8ClampedArray(outputBuffer);
return new ImageData(pixels, width, height);
});
const getRGBImageData = (output, imageReader2D) => {
if (imageReader2D === null)
return new ImageData(0, 0);
imageReader2D.drawImage(output, 0, 0);
return imageReader2D.getImageData(0, 0, output.width, output.height);
};
const getImageBitmap = (imageData, format) => __awaiter(void 0, void 0, void 0, function* () {
switch (format) {
case "nv12":
case "yuv":
if (!window.VideoFrame)
return Promise.reject(new Error(`Cannot process frame. ImageData in ${format} is not supported by this browser.`));
const frame = new VideoFrame(imageData.data.buffer, {
format: "NV12",
codedWidth: imageData.width,
codedHeight: imageData.height,
timestamp: 0,
});
return createImageBitmap(frame);
case "rgb":
return createImageBitmap(imageData);
}
});
export const createFunctionSource = (sourceFunction, options = {}) => {
let width = 0;
let height = 0;
let shouldProcessFrame = true;
const imageReaderCanvas = document.createElement("canvas");
const imageReader2D = imageReaderCanvas.getContext("2d");
const subscriber = {
onAttach: (source, lensCore, reportError) => {
const outputs = lensCore.getOutputCanvases();
const output = {
live: outputs[lensCore.CanvasType.Preview.value],
capture: outputs[lensCore.CanvasType.Capture.value],
};
const processFrame = (source, lensCore, reportError) => requestAnimationFrame(() => __awaiter(void 0, void 0, void 0, function* () {
var _a;
if (!shouldProcessFrame)
return;
try {
yield sourceFunction(({ format, imageData, timestampMillis }) => {
const frameOutput = new Promise((resolve, reject) => __awaiter(void 0, void 0, void 0, function* () {
const inputFrame = yield getImageBitmap(imageData, format !== null && format !== void 0 ? format : "rgb");
if (inputFrame.width !== width || inputFrame.height !== height) {
width = imageReaderCanvas.width = inputFrame.width;
height = imageReaderCanvas.height = inputFrame.height;
source.setRenderSize(width, height);
}
lensCore
.processFrame({ inputFrame, timestampMillis })
.then(() => __awaiter(void 0, void 0, void 0, function* () {
inputFrame.close();
switch (format !== null && format !== void 0 ? format : "rgb") {
case "nv12":
case "yuv":
const [live, capture] = yield Promise.all([
getYUVImageData(output.live, lensCore),
getYUVImageData(output.capture, lensCore),
]).catch((error) => {
reject(error);
return [undefined, undefined];
});
if (!live || !capture)
return;
return resolve({ live, capture });
case "rgb":
return resolve({
live: getRGBImageData(output.live, imageReader2D),
capture: getRGBImageData(output.capture, imageReader2D),
});
}
}))
.catch((error) => {
inputFrame.close();
reject(error);
});
}));
frameOutput.finally(() => processFrame(source, lensCore, reportError));
return frameOutput;
});
}
catch (error) {
reportError(new Error("Failure to process frame, which was not handled by the provided " +
`MediaSourceFunction ${(_a = sourceFunction.name) !== null && _a !== void 0 ? _a : "anonymous"}.`, { cause: error }));
}
}));
processFrame(source, lensCore, reportError);
},
onDetach: () => {
shouldProcessFrame = false;
},
};
return new CameraKitSource({ useManualFrameProcessing: true }, subscriber, options);
};
//# sourceMappingURL=FunctionSource.js.map