@face-detector/react-face-detection
Version:
Face Detector Web SDK React Components and Hooks Package
294 lines (293 loc) • 10.2 kB
JavaScript
import { jsx, jsxs, Fragment } from "react/jsx-runtime";
import { createContext, useContext, useState, useRef, useCallback, useEffect, useMemo, forwardRef, useImperativeHandle } from "react";
import { FaceDetector, defaultConfig } from "@face-detector/core";
var i = /* @__PURE__ */ ((n) => (n.INITIALIZING = "initializing", n.READY = "ready", n.RUNNING = "running", n.MEASURING = "measuring", n.COMPLETED = "completed", n.FAILED = "failed", n))(i || {});
const FaceDetectorContext = createContext(null);
const useFaceDetectorContext = () => {
const context = useContext(FaceDetectorContext);
if (!context) {
throw new Error("useFaceDetectorContext must be used within a FaceDetectorVideo component");
}
return context;
};
const detector = /* @__PURE__ */ new WeakMap();
function createFaceDetector(config) {
if (detector.has(config.measurementConfig.extractingCanvas)) {
return detector.get(config.measurementConfig.extractingCanvas);
}
const newDetector = new FaceDetector(config);
detector.set(config.measurementConfig.extractingCanvas, newDetector);
return newDetector;
}
function createConfig(videoElement, extractingCanvasElement, config) {
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
const baseConfig = defaultConfig(videoElement, extractingCanvasElement);
return {
dataProcessingManagerConfig: {
targetDataLength: ((_a = config == null ? void 0 : config.dataProcessingManagerConfig) == null ? void 0 : _a.targetDataLength) ?? baseConfig.dataProcessingManagerConfig.targetDataLength
},
faceDetectionManagerConfig: {
minDetectionConfidence: ((_b = config == null ? void 0 : config.faceDetectionManagerConfig) == null ? void 0 : _b.minDetectionConfidence) ?? baseConfig.faceDetectionManagerConfig.minDetectionConfidence,
delegate: ((_c = config == null ? void 0 : config.faceDetectionManagerConfig) == null ? void 0 : _c.delegate) ?? baseConfig.faceDetectionManagerConfig.delegate
},
webCamManagerConfig: {
width: ((_d = config == null ? void 0 : config.webCamManagerConfig) == null ? void 0 : _d.width) ?? baseConfig.webCamManagerConfig.width,
height: ((_e = config == null ? void 0 : config.webCamManagerConfig) == null ? void 0 : _e.height) ?? baseConfig.webCamManagerConfig.height
},
measurementConfig: {
countdown: ((_f = config == null ? void 0 : config.measurementConfig) == null ? void 0 : _f.countdown) ?? baseConfig.measurementConfig.countdown,
processingFps: ((_g = config == null ? void 0 : config.measurementConfig) == null ? void 0 : _g.processingFps) ?? baseConfig.measurementConfig.processingFps,
validationFps: ((_h = config == null ? void 0 : config.measurementConfig) == null ? void 0 : _h.validationFps) ?? baseConfig.measurementConfig.validationFps,
retryCount: ((_i = config == null ? void 0 : config.measurementConfig) == null ? void 0 : _i.retryCount) ?? baseConfig.measurementConfig.retryCount,
willReadFrequently: ((_j = config == null ? void 0 : config.measurementConfig) == null ? void 0 : _j.willReadFrequently) ?? baseConfig.measurementConfig.willReadFrequently,
extractingCanvas: extractingCanvasElement,
videoElement
},
debug: (config == null ? void 0 : config.debug) ?? baseConfig.debug
};
}
const FaceDetectorProvider = ({
config,
children,
onVideoReady,
onCanvasReady,
onStreamReady
}) => {
const [detector2, setDetector] = useState(null);
const videoElementRef = useRef(null);
const extractingCanvasElementRef = useRef(null);
const [debug, setDebug] = useState(true);
const [report, setReport] = useState(null);
const registerVideo = useCallback((videoElement) => {
videoElementRef.current = videoElement;
onVideoReady == null ? void 0 : onVideoReady(videoElement);
if (extractingCanvasElementRef.current) {
initializeFaceDetector();
}
}, [onVideoReady]);
const registerCanvas = useCallback((extractingCanvasElement) => {
extractingCanvasElementRef.current = extractingCanvasElement;
onCanvasReady == null ? void 0 : onCanvasReady(extractingCanvasElement);
if (videoElementRef.current) {
initializeFaceDetector();
}
}, [onCanvasReady]);
const initializeFaceDetector = useCallback(() => {
if (!videoElementRef.current || !extractingCanvasElementRef.current || detector2) {
return;
}
try {
const coreConfig = createConfig(videoElementRef.current, extractingCanvasElementRef.current, config);
setDebug(coreConfig.debug);
const newDetector = createFaceDetector(coreConfig);
setDetector(newDetector);
const unsubscribeState = newDetector.subscribeState((newState) => {
if (newState === i.READY && videoElementRef.current) {
const srcObject = videoElementRef.current.srcObject;
if (srcObject && typeof MediaStream !== "undefined" && srcObject instanceof MediaStream) {
onStreamReady == null ? void 0 : onStreamReady(srcObject);
}
}
});
return () => {
unsubscribeState();
newDetector.stop();
setDetector(null);
};
} catch (error) {
console.error("react-hooks: FaceDetector initialization failed:", error);
}
}, [config, detector2]);
useEffect(() => {
return () => {
if (detector2) {
detector2.stop();
setDetector(null);
}
};
}, [detector2]);
const contextValue = useMemo(() => ({
detector: detector2,
report,
setReport,
registerVideo,
registerCanvas,
debug
}), [detector2, report, setReport, registerVideo, registerCanvas, debug]);
return /* @__PURE__ */ jsx(FaceDetectorContext.Provider, { value: contextValue, children });
};
const FaceDetectorVideo = forwardRef(({
className,
style,
...videoProps
}, ref) => {
const videoRef = useRef(null);
const extractingCanvasRef = useRef(null);
const { registerVideo, registerCanvas } = useFaceDetectorContext();
useImperativeHandle(ref, () => videoRef.current, []);
useEffect(() => {
if (videoRef.current) {
registerVideo(videoRef.current);
}
if (extractingCanvasRef.current) {
registerCanvas(extractingCanvasRef.current);
}
}, [registerVideo, registerCanvas]);
return /* @__PURE__ */ jsxs(Fragment, { children: [
/* @__PURE__ */ jsx(
"video",
{
ref: videoRef,
className: `face-detector-video ${className || ""}`,
style,
autoPlay: true,
muted: true,
playsInline: true,
...videoProps
}
),
/* @__PURE__ */ jsx(
"canvas",
{
ref: extractingCanvasRef,
className: "face-detector-extracting-canvas",
style: { display: "none" }
}
)
] });
});
FaceDetectorVideo.displayName = "FaceDetectorVideo";
const useFaceDetector = () => {
const { detector: detector2, report, setReport, debug } = useFaceDetectorContext();
const [duration, setDuration] = useState(null);
const start = useCallback(async () => {
const start2 = performance.now();
if (!detector2) {
throw new Error("FaceDetector not initialized");
}
try {
const result = await detector2.run();
setReport(result);
const end = performance.now();
if (debug) {
setDuration(end - start2);
}
return result;
} catch (err) {
const error = err;
console.error("Face detection failed:", error);
setReport(null);
throw error;
}
}, [detector2, setReport]);
const stop = useCallback(() => {
if (detector2) {
detector2.stop();
}
}, [detector2]);
const terminate = useCallback(() => {
if (detector2) {
detector2.terminate();
}
}, [detector2]);
const reset = useCallback(() => {
if (detector2) {
detector2.reset();
}
}, [detector2]);
const enableRGBExtraction = useCallback((shouldExcuteRGBExtraction) => {
if (detector2) {
return detector2.enableRGBExtraction(shouldExcuteRGBExtraction);
}
}, [detector2]);
return {
// 상태 정보
report,
duration,
// 제어 메서드
start,
stop,
terminate,
reset,
enableRGBExtraction,
// FaceDetector 인스턴스
detector: detector2
};
};
const useFaceDetectorState = () => {
const { detector: detector2 } = useFaceDetectorContext();
const [state, setState] = useState(i.INITIALIZING);
useEffect(() => {
if (!detector2) {
setState(i.INITIALIZING);
return;
}
setState(detector2.getState());
const unsubscribe = detector2.subscribeState((newState) => {
setState(newState);
});
return unsubscribe;
}, [detector2]);
return state;
};
const useCountDown = () => {
const { detector: detector2 } = useFaceDetectorContext();
const [countdown, setCountdown] = useState(0);
useEffect(() => {
if (!detector2) {
return;
}
const unsubscribe = detector2.subscribeCountdown((countdown2) => {
setCountdown(countdown2);
});
return () => {
unsubscribe();
};
}, [detector2]);
return countdown;
};
const useProgress = () => {
const { detector: detector2 } = useFaceDetectorContext();
const [progress, setProgress] = useState(0);
useEffect(() => {
if (!detector2) {
return;
}
const unsubscribe = detector2.subscribeProgress((progress2) => {
setProgress(progress2);
});
return () => {
unsubscribe();
};
}, [detector2]);
return progress;
};
const useFacePosition = () => {
const { detector: detector2 } = useFaceDetectorContext();
const [facePosition, setFacePosition] = useState(null);
useEffect(() => {
if (!detector2) {
return;
}
const unsubscribe = detector2.subscribeFacePosition((facePosition2) => {
setFacePosition(facePosition2);
});
return () => {
unsubscribe();
};
}, [detector2]);
return facePosition;
};
export {
FaceDetectorContext,
FaceDetectorProvider,
FaceDetectorVideo,
i as ProcessState,
useCountDown,
useFaceDetector,
useFaceDetectorContext,
useFaceDetectorState,
useFacePosition,
useProgress
};
//# sourceMappingURL=index.es.js.map