UNPKG

@sign-speak/react-sdk

Version:

Unlock Sign Language Recognition, Avatar, and Speech Recognition.

35 lines (34 loc) 1.58 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.SpeechProduction = void 0; var react_1 = require("react"); var ttsHook_1 = require("../hooks/ttsHook"); var SpeechProduction = function (_a) { var _b = _a.model, model = _b === void 0 ? "MALE" : _b, text = _a.text, _c = _a.play, play = _c === void 0 ? true : _c, _d = _a.onLoaded, onLoaded = _d === void 0 ? function () { } : _d, _e = _a.onPlaying, onPlaying = _e === void 0 ? function () { } : _e, _f = _a.onStopped, onStopped = _f === void 0 ? function () { } : _f; var _g = (0, ttsHook_1.useSpeechProduction)(), blob = _g.blob, triggerProduction = _g.triggerProduction; // Trigger speech production when text or model changes. (0, react_1.useEffect)(function () { triggerProduction({ english: text }, { model: model }) .then(function () { onLoaded(); }) .catch(function (err) { console.error("Error producing speech:", err); }); }, [text, model]); // When blob is ready and play is true, create an audio element to play it. (0, react_1.useEffect)(function () { if (play && blob) { onPlaying(); var audioUrl_1 = URL.createObjectURL(blob); var audioElement = new Audio(audioUrl_1); audioElement.play(); audioElement.onended = function () { URL.revokeObjectURL(audioUrl_1); onStopped(); }; } }, [blob, play]); return null; }; exports.SpeechProduction = SpeechProduction;