UNPKG

react-native-vision-camera-detector

Version:

Frame Processor Plugin to detect faces & texts using MLKit Vision Face Detector for React Native Vision Camera!

191 lines (173 loc) 6.71 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.Camera = void 0; exports.useTextRecognition = useTextRecognition; exports.useTranslate = useTranslate; var _react = _interopRequireWildcard(require("react")); var _reactNativeVisionCamera = require("react-native-vision-camera"); var _reactNativeWorkletsCore = require("react-native-worklets-core"); var _FaceDetector = require("./FaceDetector"); var _scanText = require("./text_detector/scanText"); var _translateText = require("./text_detector/translateText"); function _getRequireWildcardCache(e) { if ("function" != typeof WeakMap) return null; var r = new WeakMap(), t = new WeakMap(); return (_getRequireWildcardCache = function (e) { return e ? t : r; })(e); } function _interopRequireWildcard(e, r) { if (!r && e && e.__esModule) return e; if (null === e || "object" != typeof e && "function" != typeof e) return { default: e }; var t = _getRequireWildcardCache(r); if (t && t.has(e)) return t.get(e); var n = { __proto__: null }, a = Object.defineProperty && Object.getOwnPropertyDescriptor; for (var u in e) if ("default" !== u && {}.hasOwnProperty.call(e, u)) { var i = a ? Object.getOwnPropertyDescriptor(e, u) : null; i && (i.get || i.set) ? Object.defineProperty(n, u, i) : n[u] = e[u]; } return n.default = e, t && t.set(e, n), n; } function _extends() { return _extends = Object.assign ? Object.assign.bind() : function (n) { for (var e = 1; e < arguments.length; e++) { var t = arguments[e]; for (var r in t) ({}).hasOwnProperty.call(t, r) && (n[r] = t[r]); } return n; }, _extends.apply(null, arguments); } // types /** * Create a Worklet function that persists between re-renders. * The returned function can be called from both a Worklet context and the JS context, but will execute on a Worklet context. * * @param {function} func The Worklet. Must be marked with the `'worklet'` directive. * @param {DependencyList} dependencyList The React dependencies of this Worklet. * @returns {UseWorkletType} A memoized Worklet */ function useWorklet(func, dependencyList) { const worklet = _react.default.useMemo(() => { const context = _reactNativeWorkletsCore.Worklets.defaultContext; return context.createRunAsync(func); }, dependencyList); return worklet; } /** * Create a Worklet function that runs the giver function on JS context. * The returned function can be called from a Worklet to hop back to the JS thread. * * @param {function} func The Worklet. Must be marked with the `'worklet'` directive. * @param {DependencyList} dependencyList The React dependencies of this Worklet. * @returns {UseRunInJSType} a memoized Worklet */ function useRunInJS(func, dependencyList) { return _react.default.useMemo(() => _reactNativeWorkletsCore.Worklets.createRunOnJS(func), dependencyList); } /** * Vision camera wrapper * * @param {ComponentType} props Camera + face detection props * @returns */ const Camera = exports.Camera = /*#__PURE__*/_react.default.forwardRef(({ faceDetectionOptions, faceDetectionCallback, skiaActions, ...props }, ref) => { const { detectFaces } = (0, _FaceDetector.useFaceDetector)(faceDetectionOptions); /** * Is there an async task already running? */ const isAsyncContextBusy = (0, _reactNativeWorkletsCore.useSharedValue)(false); const faces = (0, _reactNativeWorkletsCore.useSharedValue)('[]'); /** * Throws logs/errors back on js thread */ const logOnJs = _reactNativeWorkletsCore.Worklets.createRunOnJS((log, error) => { if (error) { console.error(log, error.message ?? JSON.stringify(error)); } else { console.log(log); } }); /** * Runs on detection callback on js thread */ const runOnJs = useRunInJS(faceDetectionCallback, [faceDetectionCallback]); /** * Async context that will handle face detection */ const runOnAsyncContext = useWorklet(frame => { 'worklet'; try { faces.value = JSON.stringify(detectFaces(frame)); // increment frame count so we can use frame on // js side without frame processor getting stuck frame.incrementRefCount(); runOnJs(JSON.parse(faces.value), frame).finally(() => { 'worklet'; // finally decrement frame count so it can be dropped frame.decrementRefCount(); }); } catch (error) { logOnJs('Execution error:', error); } finally { frame.decrementRefCount(); isAsyncContextBusy.value = false; } }, [detectFaces, runOnJs]); /** * Detect faces on frame on an async context without blocking camera preview * * @param {Frame} frame Current frame */ function runAsync(frame) { 'worklet'; if (isAsyncContextBusy.value) return; // set async context as busy isAsyncContextBusy.value = true; // cast to internal frame and increment ref count const internal = frame; internal.incrementRefCount(); // detect faces in async context runOnAsyncContext(internal); } /** * Skia frame processor */ const skiaFrameProcessor = (0, _reactNativeVisionCamera.useSkiaFrameProcessor)(frame => { 'worklet'; frame.render(); skiaActions(JSON.parse(faces.value), frame); runAsync(frame); }, [runOnAsyncContext, skiaActions]); /** * Default frame processor */ const cameraFrameProcessor = (0, _reactNativeVisionCamera.useFrameProcessor)(frame => { 'worklet'; runAsync(frame); }, [runOnAsyncContext]); /** * Camera frame processor */ const frameProcessor = (() => { const { autoMode } = faceDetectionOptions ?? {}; if (!autoMode && !!skiaActions) return skiaFrameProcessor; return cameraFrameProcessor; })(); // // use bellow when vision-camera's // context creation issue is solved // // /** // * Runs on detection callback on js thread // */ // const runOnJs = useRunOnJS( faceDetectionCallback, [ // faceDetectionCallback // ] ) // const cameraFrameProcessor = useFrameProcessor( ( frame ) => { // 'worklet' // runAsync( frame, () => { // 'worklet' // runOnJs( // detectFaces( frame ), // frame // ) // } ) // }, [ runOnJs ] ) return /*#__PURE__*/_react.default.createElement(_reactNativeVisionCamera.Camera, _extends({}, props, { ref: ref, frameProcessor: frameProcessor, pixelFormat: "yuv" })); }); function useTextRecognition(options) { return (0, _react.useMemo)(() => (0, _scanText.createTextRecognitionPlugin)(options), [options]); } function useTranslate(options) { return (0, _react.useMemo)(() => (0, _translateText.createTranslatorPlugin)(options), [options]); } //# sourceMappingURL=Camera.js.map