react-dictate-button
Version:
A button to start dictation using Web Speech API, with an easy to understand event lifecycle.
557 lines (544 loc) • 16.7 kB
JavaScript
// src/Composer.tsx
import React, { useCallback, useEffect as useEffect2, useMemo, useRef as useRef2, useState } from "react";
import { useRefFrom } from "use-ref-from";
// src/Context.ts
import { createContext } from "react";
var Context = createContext(
Object.freeze({
abortable: false,
readyState: 0,
supported: true
})
);
var Context_default = Context;
// src/private/assert.ts
function assert(truthy) {
if (false) {
throw new Error("Assertion failed.");
}
}
// src/usePrevious.ts
import { useEffect, useRef } from "react";
function usePrevious(value) {
const ref = useRef();
useEffect(() => {
ref.current = value;
});
return ref.current;
}
// src/vendorPrefix.ts
function vendorPrefix(name) {
if (typeof window !== "undefined") {
return name in window && typeof window[name] !== "undefined" ? (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
window[name]
) : (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
window[`webkit${name}`]
);
}
return;
}
// src/Composer.tsx
function recognitionAbortable(recognition) {
return !!(recognition && typeof recognition === "object" && "abort" in recognition && typeof recognition.abort === "function");
}
var Composer = ({
children,
continuous,
extra,
grammar,
lang,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList = navigator.mediaDevices && // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers.
navigator.mediaDevices.getUserMedia && vendorPrefix("SpeechGrammarList"),
speechRecognition = navigator.mediaDevices && // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers.
navigator.mediaDevices.getUserMedia && vendorPrefix("SpeechRecognition"),
started
}) => {
const [readyState, setReadyState] = useState(0);
const continuousRef = useRefFrom(continuous);
const extraRef = useRefFrom(extra);
const grammarRef = useRefFrom(grammar);
const langRef = useRefFrom(lang);
const notAllowedRef = useRef2(false);
const onDictateRef = useRefFrom(onDictate);
const onEndRef = useRefFrom(onEnd);
const onErrorRef = useRefFrom(onError);
const onProgressRef = useRefFrom(onProgress);
const onRawEventRef = useRefFrom(onRawEvent);
const onStartRef = useRefFrom(onStart);
const prevSpeechRecognition = usePrevious(speechRecognition);
const recognitionRef = useRef2();
const speechGrammarListRef = useRefFrom(speechGrammarList);
const speechRecognitionClassRef = useRefFrom(speechRecognition);
const stateRef = useRef2("idle");
const unmountedRef = useRef2(false);
if (prevSpeechRecognition !== speechRecognition) {
notAllowedRef.current = false;
}
const emitDictate = useCallback(
(event) => {
var _a;
if (unmountedRef.current) {
return;
}
assert(stateRef.current !== "started");
(_a = onDictateRef.current) == null ? void 0 : _a.call(onDictateRef, event);
stateRef.current = "has result";
},
[onDictateRef, stateRef]
);
const emitEnd = useCallback(() => {
var _a;
if (unmountedRef.current) {
return;
}
if (stateRef.current === "has progress") {
emitDictate({ type: "dictate" });
stateRef.current = "has result";
}
assert(stateRef.current === "started" || stateRef.current === "has result" || stateRef.current === "error");
(_a = onEndRef.current) == null ? void 0 : _a.call(onEndRef, new Event("end"));
if (stateRef.current !== "error") {
stateRef.current = "idle";
}
}, [onEndRef, stateRef]);
const emitError = useCallback(
(event) => {
var _a;
if (unmountedRef.current) {
return;
}
(_a = onErrorRef.current) == null ? void 0 : _a.call(onErrorRef, event);
stateRef.current = "error";
},
[onErrorRef, stateRef]
);
const emitProgress = useCallback(
(event) => {
var _a;
if (unmountedRef.current) {
return;
}
assert(
stateRef.current === "started" || stateRef.current === "has progress" || stateRef.current === "has result"
);
(_a = onProgressRef.current) == null ? void 0 : _a.call(onProgressRef, event);
stateRef.current = "has progress";
},
[onProgressRef, stateRef]
);
const emitStart = useCallback(() => {
var _a;
if (unmountedRef.current) {
return;
}
assert(stateRef.current === "idle");
(_a = onStartRef.current) == null ? void 0 : _a.call(onStartRef, new Event("start"));
stateRef.current = "started";
}, [onStartRef, stateRef]);
const handleAudioEnd = useCallback(
({ target }) => target === recognitionRef.current && setReadyState(3),
[recognitionRef, setReadyState]
);
const handleAudioStart = useCallback(
({ target }) => {
if (target !== recognitionRef.current) {
return;
}
setReadyState(2);
emitProgress({ abortable: recognitionAbortable(target), type: "progress" });
},
[emitProgress, recognitionRef, setReadyState]
);
const handleEnd = useCallback(
({ target }) => {
if (target !== recognitionRef.current) {
return;
}
emitEnd();
setReadyState(0);
recognitionRef.current = void 0;
},
[emitEnd, recognitionRef, setReadyState]
);
const handleError = useCallback(
(event) => {
if (event.target !== recognitionRef.current) {
return;
}
recognitionRef.current = void 0;
if (event.error === "not-allowed") {
notAllowedRef.current = true;
}
setReadyState(0);
emitError(event);
emitEnd();
},
[emitEnd, emitError, notAllowedRef, recognitionRef, setReadyState]
);
const handleRawEvent = useCallback(
(event) => {
var _a;
if (event.target !== recognitionRef.current) {
return;
}
(_a = onRawEventRef.current) == null ? void 0 : _a.call(onRawEventRef, event);
},
[onRawEventRef, recognitionRef]
);
const handleResult = useCallback(
({ resultIndex, results: rawResults, target }) => {
if (target !== recognitionRef.current) {
return;
}
if (rawResults.length) {
const rawResult = rawResults[resultIndex ?? rawResults.length - 1];
if (rawResult == null ? void 0 : rawResult.isFinal) {
const alt = rawResult[0];
alt && emitDictate({
result: {
confidence: alt.confidence,
transcript: alt.transcript
},
type: "dictate"
});
} else {
emitProgress({
abortable: recognitionAbortable(target),
results: Object.freeze(
Array.from(rawResults).filter((result) => !result.isFinal).map((alts) => {
const firstAlt = alts[0];
return {
confidence: (firstAlt == null ? void 0 : firstAlt.confidence) || 0,
transcript: (firstAlt == null ? void 0 : firstAlt.transcript) || ""
};
})
),
type: "progress"
});
}
}
},
[emitDictate, emitProgress, recognitionRef]
);
const handleStart = useCallback(
({ target }) => {
if (target === recognitionRef.current) {
emitStart();
setReadyState(1);
}
},
[emitStart, recognitionRef, setReadyState]
);
useEffect2(() => {
if (!started) {
return;
}
if (!speechRecognitionClassRef.current || notAllowedRef.current) {
throw new Error("Speech recognition is not supported");
} else if (recognitionRef.current) {
throw new Error("Speech recognition already started, cannot start a new one.");
}
const grammars = speechGrammarListRef.current && grammarRef.current && new speechGrammarListRef.current();
const recognition = recognitionRef.current = new speechRecognitionClassRef.current();
if (grammars) {
grammars.addFromString(grammarRef.current, 1);
recognition.grammars = grammars;
}
if (typeof langRef.current !== "undefined") {
recognition.lang = langRef.current;
}
recognition.continuous = !!continuousRef.current;
recognition.interimResults = true;
recognition.addEventListener("audioend", handleAudioEnd);
recognition.addEventListener("audiostart", handleAudioStart);
recognition.addEventListener("end", handleEnd);
recognition.addEventListener("error", handleError);
recognition.addEventListener("result", handleResult);
recognition.addEventListener("start", handleStart);
recognition.addEventListener("nomatch", handleRawEvent);
recognition.addEventListener("audioend", handleRawEvent);
recognition.addEventListener("audiostart", handleRawEvent);
recognition.addEventListener("end", handleRawEvent);
recognition.addEventListener("error", handleRawEvent);
recognition.addEventListener("result", handleRawEvent);
recognition.addEventListener("soundend", handleRawEvent);
recognition.addEventListener("soundstart", handleRawEvent);
recognition.addEventListener("speechend", handleRawEvent);
recognition.addEventListener("speechstart", handleRawEvent);
recognition.addEventListener("start", handleRawEvent);
const { current: extra2 } = extraRef;
extra2 && Object.entries(extra2).forEach(([key, value]) => {
if (key !== "constructor" && key !== "prototype" && key !== "__proto__") {
recognition[key] = value;
}
});
recognition.start();
return () => {
if (recognitionAbortable(recognition)) {
recognition.abort();
} else if (!unmountedRef.current) {
console.warn("react-dictate-state: Cannot stop because SpeechRecognition does not have abort() function.");
}
};
}, [
continuousRef,
emitEnd,
extraRef,
grammarRef,
handleAudioEnd,
handleAudioStart,
handleEnd,
handleError,
handleRawEvent,
handleResult,
handleStart,
langRef,
notAllowedRef,
recognitionRef,
speechGrammarListRef,
speechRecognitionClassRef,
started,
stateRef
]);
useEffect2(
() => () => {
unmountedRef.current = true;
},
[]
);
const abortable = recognitionAbortable(recognitionRef.current) && readyState === 2;
const supported = !!speechRecognition && !notAllowedRef.current;
const context = useMemo(
() => Object.freeze({
abortable,
readyState,
supported
}),
[abortable, readyState, supported]
);
return /* @__PURE__ */ React.createElement(Context_default.Provider, { value: context }, /* @__PURE__ */ React.createElement(Context_default.Consumer, null, (context2) => typeof children === "function" ? children(context2) : children));
};
var Composer_default = Composer;
// src/DictateButton.tsx
import React2, { useCallback as useCallback2, useState as useState2 } from "react";
import { useRefFrom as useRefFrom2 } from "use-ref-from";
// src/hooks/internal/useDictateContext.ts
import { useContext } from "react";
function useDictateContext() {
return useContext(Context_default);
}
// src/hooks/useReadyState.ts
function useReadyState() {
const { readyState } = useDictateContext();
return Object.freeze([readyState]);
}
// src/hooks/useSupported.ts
function useSupported() {
const { supported } = useDictateContext();
return Object.freeze([supported]);
}
// src/DictateButton.tsx
var DictateButtonCore = ({ children, className, disabled, onClick }) => {
const [readyState] = useReadyState();
const [supported] = useSupported();
return /* @__PURE__ */ React2.createElement(
"button",
{
className,
disabled: readyState === 1 || readyState === 3 || !supported || disabled,
onClick,
type: "button"
},
typeof children === "function" ? children({ readyState }) : children
);
};
var DictateButton = ({
children,
className,
continuous,
disabled,
extra,
grammar,
lang,
onClick,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList,
speechRecognition
}) => {
const [started, setStarted] = useState2(false);
const onClickRef = useRefFrom2(onClick);
const onEndRef = useRefFrom2(onEnd);
const onErrorRef = useRefFrom2(onError);
const onStartRef = useRefFrom2(onStart);
const handleClick = useCallback2(
(event) => {
onClickRef.current && onClickRef.current(event);
!event.isDefaultPrevented() && setStarted((started2) => !started2);
},
[onClickRef, setStarted]
);
const handleEnd = useCallback2(
(event) => {
var _a;
setStarted(false);
(_a = onEndRef.current) == null ? void 0 : _a.call(onEndRef, event);
},
[onEndRef, setStarted]
);
const handleError = useCallback2(
(event) => {
var _a;
setStarted(false);
(_a = onErrorRef.current) == null ? void 0 : _a.call(onErrorRef, event);
},
[onErrorRef, setStarted]
);
const handleStart = useCallback2(
(event) => {
var _a;
setStarted(true);
(_a = onStartRef.current) == null ? void 0 : _a.call(onStartRef, event);
},
[onStartRef, setStarted]
);
return /* @__PURE__ */ React2.createElement(
Composer_default,
{
continuous,
extra,
grammar,
lang,
onDictate,
onEnd: handleEnd,
onError: handleError,
onProgress,
onRawEvent,
onStart: handleStart,
speechGrammarList,
speechRecognition,
started: started && !disabled
},
/* @__PURE__ */ React2.createElement(DictateButtonCore, { className, disabled, onClick: handleClick }, children)
);
};
var DictateButton_default = DictateButton;
// src/DictateCheckbox.tsx
import React3, { useCallback as useCallback3, useState as useState3 } from "react";
import { useRefFrom as useRefFrom3 } from "use-ref-from";
var DictateCheckboxCore = ({ children, className, disabled, onChange, started }) => {
const [readyState] = useReadyState();
const [supported] = useSupported();
return /* @__PURE__ */ React3.createElement("label", null, /* @__PURE__ */ React3.createElement(
"input",
{
checked: started,
className,
disabled: readyState === 1 || readyState === 3 || !supported || disabled,
onChange,
type: "checkbox"
}
), typeof children === "function" ? children({ readyState }) : children);
};
var DictateCheckbox = ({
children,
className,
continuous,
disabled,
extra,
grammar,
lang,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList,
speechRecognition
}) => {
const [started, setStarted] = useState3(false);
const onEndRef = useRefFrom3(onEnd);
const onErrorRef = useRefFrom3(onError);
const onStartRef = useRefFrom3(onStart);
const handleChange = useCallback3(
({ currentTarget: { checked } }) => setStarted(checked),
[setStarted]
);
const handleEnd = useCallback3(
(event) => {
var _a;
setStarted(false);
(_a = onEndRef.current) == null ? void 0 : _a.call(onEndRef, event);
},
[onEndRef, setStarted]
);
const handleError = useCallback3(
(event) => {
var _a;
setStarted(false);
(_a = onErrorRef.current) == null ? void 0 : _a.call(onErrorRef, event);
},
[onErrorRef, setStarted]
);
const handleStart = useCallback3(
(event) => {
var _a;
setStarted(true);
(_a = onStartRef.current) == null ? void 0 : _a.call(onStartRef, event);
},
[onStartRef, setStarted]
);
return /* @__PURE__ */ React3.createElement(
Composer_default,
{
continuous,
extra,
grammar,
lang,
onDictate,
onEnd: handleEnd,
onError: handleError,
onProgress,
onRawEvent,
onStart: handleStart,
speechGrammarList,
speechRecognition,
started: started && !disabled
},
/* @__PURE__ */ React3.createElement(DictateCheckboxCore, { className, disabled, onChange: handleChange, started }, children)
);
};
var DictateCheckbox_default = DictateCheckbox;
// src/hooks/useAbortable.ts
function useAbortable() {
const { abortable } = useDictateContext();
return Object.freeze([abortable]);
}
// src/index.ts
var Context2 = Context_default;
var DictateButton_ = DictateButton_default;
var src_default = DictateButton_;
export {
Composer_default as Composer,
Context2 as Context,
DictateButton_default as DictateButton,
DictateCheckbox_default as DictateCheckbox,
src_default as default,
useAbortable,
useReadyState,
useSupported
};
//# sourceMappingURL=react-dictate-button.mjs.map