@sign-speak/react-sdk
Version:
Unlock Sign Language Recognition, Avatar, and Speech Recognition.
111 lines (110 loc) • 7.13 kB
JavaScript
;
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.SignRecognition = void 0;
var jsx_runtime_1 = require("react/jsx-runtime");
var react_1 = require("react");
var react_sdk_1 = require("@sign-speak/react-sdk");
var SignRecognition = function (_a) {
var model = _a.model, onResult = _a.onResult, _b = _a.interpretationClassName, interpretationClassName = _b === void 0 ? "" : _b, _c = _a.containerClassName, containerClassName = _c === void 0 ? "" : _c, _d = _a.loadingClassName, loadingClassName = _d === void 0 ? "" : _d, _e = _a.buttonClassName, buttonClassName = _e === void 0 ? "" : _e, _f = _a.cameraClassName, cameraClassName = _f === void 0 ? "" : _f, _g = _a.confidenceThreshold, confidenceThreshold = _g === void 0 ? Math.log(0.5) : _g;
// Create a ref for the video preview element.
var previewRef = (0, react_1.useRef)(null);
// Initialize the camera preview on mount.
(0, react_1.useEffect)(function () {
function initPreview() {
return __awaiter(this, void 0, void 0, function () {
var stream, err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
_a.trys.push([0, 2, , 3]);
return [4 /*yield*/, navigator.mediaDevices.getUserMedia({
video: true,
audio: false,
})];
case 1:
stream = _a.sent();
if (previewRef.current) {
previewRef.current.srcObject = stream;
}
return [3 /*break*/, 3];
case 2:
err_1 = _a.sent();
console.error("Error accessing camera for preview:", err_1);
return [3 /*break*/, 3];
case 3: return [2 /*return*/];
}
});
});
}
initPreview();
return function () {
if (previewRef.current && previewRef.current.srcObject) {
previewRef.current.srcObject
.getTracks()
.forEach(function (track) { return track.stop(); });
}
};
}, []);
// Use the custom hook to handle recognition logic.
var _h = (0, react_sdk_1.useSignLanguageRecognition)({
model: model !== null && model !== void 0 ? model : undefined,
sliceLength: 500,
singleRecognitionMode: false,
}), prediction = _h.prediction, loading = _h.loading, error = _h.error, recording = _h.recording, startRecognition = _h.startRecognition, stopRecognition = _h.stopRecognition;
(0, react_1.useEffect)(function () {
if (onResult && prediction !== undefined) {
onResult(prediction);
}
}, [prediction]);
// Derive the current interpretation from the received predictions.
var interpretation = prediction === null || prediction === void 0 ? void 0 : prediction.prediction.filter(function (x) { return x.confidence > confidenceThreshold; }).map(function (x) { return x.prediction; }).join(" ");
return ((0, jsx_runtime_1.jsxs)("div", __assign({ className: containerClassName }, { children: [(0, jsx_runtime_1.jsx)("video", { ref: previewRef, autoPlay: true, playsInline: true, muted: true, className: cameraClassName }), interpretation && ((0, jsx_runtime_1.jsx)("p", __assign({ className: interpretationClassName }, { children: interpretation }))), error && ((0, jsx_runtime_1.jsx)("p", __assign({ className: "mt-2 text-red-500" }, { children: error.message }))), loading && ((0, jsx_runtime_1.jsx)("div", { className: loadingClassName })), !loading && (recording ? ((0, jsx_runtime_1.jsx)("button", __assign({ className: buttonClassName, onClick: stopRecognition }, { children: "Stop Recording" }))) : ((0, jsx_runtime_1.jsx)("button", __assign({ className: buttonClassName, onClick: startRecognition }, { children: "Start Recording" }))))] })));
};
exports.SignRecognition = SignRecognition;