@sign-speak/react-sdk
Version:
Unlock Sign Language Recognition, Avatar, and Speech Recognition.
41 lines (40 loc) • 1.96 kB
JavaScript
;
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.SignProduction = void 0;
var jsx_runtime_1 = require("react/jsx-runtime");
var react_1 = require("react");
var react_sdk_1 = require("@sign-speak/react-sdk");
var SignProduction = function (_a) {
var text = _a.text, model = _a.model, _b = _a.videoContainerClassName, videoContainerClassName = _b === void 0 ? "" : _b, _c = _a.videoClassName, videoClassName = _c === void 0 ? "" : _c, _d = _a.loadingClassName, loadingClassName = _d === void 0 ? "" : _d;
var _e = (0, react_sdk_1.useSignProduction)(), triggerProduction = _e.triggerProduction, loading = _e.loading, blob = _e.blob;
var _f = (0, react_1.useState)(), blobURL = _f[0], setBlobURL = _f[1];
(0, react_1.useEffect)(function () {
if (blob) {
setBlobURL(URL.createObjectURL(blob));
}
}, [blob]);
// When the text changes, split it into segments and produce a sign video for each.
(0, react_1.useEffect)(function () {
triggerProduction({
english: text,
}, {
model: model !== null && model !== void 0 ? model : undefined
});
}, [text]);
return ((0, jsx_runtime_1.jsx)("div", __assign({ className: videoContainerClassName }, { children: blob == null || loading ?
(0, jsx_runtime_1.jsx)("div", { className: loadingClassName })
:
(0, jsx_runtime_1.jsx)("video", { src: blobURL, muted: true, autoPlay: true, className: videoClassName }) })));
};
exports.SignProduction = SignProduction;