react-dictate-button
Version:
A button to start dictation using Web Speech API, with an easy to understand event lifecycle.
588 lines (573 loc) • 18.9 kB
JavaScript
;
var __create = Object.create;
var __defProp = Object.defineProperty;
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
var __getOwnPropNames = Object.getOwnPropertyNames;
var __getProtoOf = Object.getPrototypeOf;
var __hasOwnProp = Object.prototype.hasOwnProperty;
var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true });
};
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === "object" || typeof from === "function") {
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
}
return to;
};
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
mod
));
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
// src/index.ts
var src_exports = {};
__export(src_exports, {
Composer: () => Composer_default,
Context: () => Context2,
DictateButton: () => DictateButton_default,
DictateCheckbox: () => DictateCheckbox_default,
default: () => src_default,
useAbortable: () => useAbortable,
useReadyState: () => useReadyState,
useSupported: () => useSupported
});
module.exports = __toCommonJS(src_exports);
// src/Composer.tsx
var import_react3 = __toESM(require("react"));
var import_use_ref_from = require("use-ref-from");
// src/Context.ts
var import_react = require("react");
var Context = (0, import_react.createContext)(
Object.freeze({
abortable: false,
readyState: 0,
supported: true
})
);
var Context_default = Context;
// src/private/assert.ts
function assert(truthy) {
if (typeof IS_DEVELOPMENT !== "undefined" && IS_DEVELOPMENT && !truthy) {
throw new Error("Assertion failed.");
}
}
// src/usePrevious.ts
var import_react2 = require("react");
function usePrevious(value) {
const ref = (0, import_react2.useRef)();
(0, import_react2.useEffect)(() => {
ref.current = value;
});
return ref.current;
}
// src/vendorPrefix.ts
function vendorPrefix(name) {
if (typeof window !== "undefined") {
return name in window && typeof window[name] !== "undefined" ? (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
window[name]
) : (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
window[`webkit${name}`]
);
}
return;
}
// src/Composer.tsx
function recognitionAbortable(recognition) {
return !!(recognition && typeof recognition === "object" && "abort" in recognition && typeof recognition.abort === "function");
}
var Composer = ({
children,
continuous,
extra,
grammar,
lang,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList = navigator.mediaDevices && // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers.
navigator.mediaDevices.getUserMedia && vendorPrefix("SpeechGrammarList"),
speechRecognition = navigator.mediaDevices && // @ts-expect-error navigator.mediaDevices.getUserMedia may not be defined in older browsers.
navigator.mediaDevices.getUserMedia && vendorPrefix("SpeechRecognition"),
started
}) => {
const [readyState, setReadyState] = (0, import_react3.useState)(0);
const continuousRef = (0, import_use_ref_from.useRefFrom)(continuous);
const extraRef = (0, import_use_ref_from.useRefFrom)(extra);
const grammarRef = (0, import_use_ref_from.useRefFrom)(grammar);
const langRef = (0, import_use_ref_from.useRefFrom)(lang);
const notAllowedRef = (0, import_react3.useRef)(false);
const onDictateRef = (0, import_use_ref_from.useRefFrom)(onDictate);
const onEndRef = (0, import_use_ref_from.useRefFrom)(onEnd);
const onErrorRef = (0, import_use_ref_from.useRefFrom)(onError);
const onProgressRef = (0, import_use_ref_from.useRefFrom)(onProgress);
const onRawEventRef = (0, import_use_ref_from.useRefFrom)(onRawEvent);
const onStartRef = (0, import_use_ref_from.useRefFrom)(onStart);
const prevSpeechRecognition = usePrevious(speechRecognition);
const recognitionRef = (0, import_react3.useRef)();
const speechGrammarListRef = (0, import_use_ref_from.useRefFrom)(speechGrammarList);
const speechRecognitionClassRef = (0, import_use_ref_from.useRefFrom)(speechRecognition);
const stateRef = (0, import_react3.useRef)("idle");
const unmountedRef = (0, import_react3.useRef)(false);
if (prevSpeechRecognition !== speechRecognition) {
notAllowedRef.current = false;
}
const emitDictate = (0, import_react3.useCallback)(
(event) => {
if (unmountedRef.current) {
return;
}
assert(stateRef.current !== "started");
onDictateRef.current?.(event);
stateRef.current = "has result";
},
[onDictateRef, stateRef]
);
const emitEnd = (0, import_react3.useCallback)(() => {
if (unmountedRef.current) {
return;
}
if (stateRef.current === "has progress") {
emitDictate({ type: "dictate" });
stateRef.current = "has result";
}
assert(stateRef.current === "started" || stateRef.current === "has result" || stateRef.current === "error");
onEndRef.current?.(new Event("end"));
if (stateRef.current !== "error") {
stateRef.current = "idle";
}
}, [emitDictate, onEndRef, stateRef]);
const emitError = (0, import_react3.useCallback)(
(event) => {
if (unmountedRef.current) {
return;
}
onErrorRef.current?.(event);
stateRef.current = "error";
},
[onErrorRef, stateRef]
);
const emitProgress = (0, import_react3.useCallback)(
(event) => {
if (unmountedRef.current) {
return;
}
assert(
stateRef.current === "started" || stateRef.current === "has progress" || stateRef.current === "has result"
);
onProgressRef.current?.(event);
stateRef.current = "has progress";
},
[onProgressRef, stateRef]
);
const emitStart = (0, import_react3.useCallback)(() => {
if (unmountedRef.current) {
return;
}
assert(stateRef.current === "idle");
onStartRef.current?.(new Event("start"));
stateRef.current = "started";
}, [onStartRef, stateRef]);
const handleAudioEnd = (0, import_react3.useCallback)(
({ target }) => target === recognitionRef.current && setReadyState(3),
[recognitionRef, setReadyState]
);
const handleAudioStart = (0, import_react3.useCallback)(
({ target }) => {
if (target !== recognitionRef.current) {
return;
}
setReadyState(2);
emitProgress({ abortable: recognitionAbortable(target), type: "progress" });
},
[emitProgress, recognitionRef, setReadyState]
);
const handleEnd = (0, import_react3.useCallback)(
({ target }) => {
if (target !== recognitionRef.current) {
return;
}
emitEnd();
setReadyState(0);
recognitionRef.current = void 0;
},
[emitEnd, recognitionRef, setReadyState]
);
const handleError = (0, import_react3.useCallback)(
(event) => {
if (event.target !== recognitionRef.current) {
return;
}
recognitionRef.current = void 0;
if (event.error === "not-allowed") {
notAllowedRef.current = true;
}
setReadyState(0);
emitError(event);
emitEnd();
},
[emitEnd, emitError, notAllowedRef, recognitionRef, setReadyState]
);
const handleRawEvent = (0, import_react3.useCallback)(
(event) => {
if (event.target !== recognitionRef.current) {
return;
}
onRawEventRef.current?.(event);
},
[onRawEventRef, recognitionRef]
);
const handleResult = (0, import_react3.useCallback)(
({ resultIndex, results: rawResults, target }) => {
if (target !== recognitionRef.current) {
return;
}
if (rawResults.length) {
const rawResult = rawResults[resultIndex ?? rawResults.length - 1];
if (rawResult?.isFinal) {
const alt = rawResult[0];
alt && emitDictate({
result: {
confidence: alt.confidence,
transcript: alt.transcript
},
type: "dictate"
});
} else {
emitProgress({
abortable: recognitionAbortable(target),
results: Object.freeze(
Array.from(rawResults).filter((result) => !result.isFinal).map((alts) => {
const firstAlt = alts[0];
return {
confidence: firstAlt?.confidence || 0,
transcript: firstAlt?.transcript || ""
};
})
),
type: "progress"
});
}
}
},
[emitDictate, emitProgress, recognitionRef]
);
const handleStart = (0, import_react3.useCallback)(
({ target }) => {
if (target === recognitionRef.current) {
emitStart();
setReadyState(1);
}
},
[emitStart, recognitionRef, setReadyState]
);
(0, import_react3.useEffect)(() => {
if (!started) {
return;
}
if (!speechRecognitionClassRef.current || notAllowedRef.current) {
throw new Error("Speech recognition is not supported");
} else if (recognitionRef.current) {
throw new Error("Speech recognition already started, cannot start a new one.");
}
const grammars = speechGrammarListRef.current && grammarRef.current && new speechGrammarListRef.current();
const recognition = recognitionRef.current = new speechRecognitionClassRef.current();
if (grammars) {
grammars.addFromString(grammarRef.current, 1);
recognition.grammars = grammars;
}
if (typeof langRef.current !== "undefined") {
recognition.lang = langRef.current;
}
recognition.continuous = !!continuousRef.current;
recognition.interimResults = true;
recognition.addEventListener("audioend", handleAudioEnd);
recognition.addEventListener("audiostart", handleAudioStart);
recognition.addEventListener("end", handleEnd);
recognition.addEventListener("error", handleError);
recognition.addEventListener("result", handleResult);
recognition.addEventListener("start", handleStart);
recognition.addEventListener("nomatch", handleRawEvent);
recognition.addEventListener("audioend", handleRawEvent);
recognition.addEventListener("audiostart", handleRawEvent);
recognition.addEventListener("end", handleRawEvent);
recognition.addEventListener("error", handleRawEvent);
recognition.addEventListener("result", handleRawEvent);
recognition.addEventListener("soundend", handleRawEvent);
recognition.addEventListener("soundstart", handleRawEvent);
recognition.addEventListener("speechend", handleRawEvent);
recognition.addEventListener("speechstart", handleRawEvent);
recognition.addEventListener("start", handleRawEvent);
const { current: extra2 } = extraRef;
extra2 && Object.entries(extra2).forEach(([key, value]) => {
if (key !== "constructor" && key !== "prototype" && key !== "__proto__") {
recognition[key] = value;
}
});
recognition.start();
return () => {
if (recognitionAbortable(recognition)) {
recognition.abort();
} else if (!unmountedRef.current) {
console.warn("react-dictate-state: Cannot stop because SpeechRecognition does not have abort() function.");
}
};
}, [
continuousRef,
emitEnd,
extraRef,
grammarRef,
handleAudioEnd,
handleAudioStart,
handleEnd,
handleError,
handleRawEvent,
handleResult,
handleStart,
langRef,
notAllowedRef,
recognitionRef,
speechGrammarListRef,
speechRecognitionClassRef,
started,
stateRef
]);
(0, import_react3.useEffect)(
() => () => {
unmountedRef.current = true;
},
[]
);
const abortable = recognitionAbortable(recognitionRef.current) && readyState === 2;
const supported = !!speechRecognition && !notAllowedRef.current;
const context = (0, import_react3.useMemo)(
() => Object.freeze({
abortable,
readyState,
supported
}),
[abortable, readyState, supported]
);
return /* @__PURE__ */ import_react3.default.createElement(Context_default.Provider, { value: context }, /* @__PURE__ */ import_react3.default.createElement(Context_default.Consumer, null, (context2) => typeof children === "function" ? children(context2) : children));
};
var Composer_default = Composer;
// src/DictateButton.tsx
var import_react5 = __toESM(require("react"));
var import_use_ref_from2 = require("use-ref-from");
// src/hooks/internal/useDictateContext.ts
var import_react4 = require("react");
function useDictateContext() {
return (0, import_react4.useContext)(Context_default);
}
// src/hooks/useReadyState.ts
function useReadyState() {
const { readyState } = useDictateContext();
return Object.freeze([readyState]);
}
// src/hooks/useSupported.ts
function useSupported() {
const { supported } = useDictateContext();
return Object.freeze([supported]);
}
// src/DictateButton.tsx
var DictateButtonCore = ({ children, className, disabled, onClick }) => {
const [readyState] = useReadyState();
const [supported] = useSupported();
return /* @__PURE__ */ import_react5.default.createElement(
"button",
{
className,
disabled: readyState === 1 || readyState === 3 || !supported || disabled,
onClick,
type: "button"
},
typeof children === "function" ? children({ readyState }) : children
);
};
var DictateButton = ({
children,
className,
continuous,
disabled,
extra,
grammar,
lang,
onClick,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList,
speechRecognition
}) => {
const [started, setStarted] = (0, import_react5.useState)(false);
const onClickRef = (0, import_use_ref_from2.useRefFrom)(onClick);
const onEndRef = (0, import_use_ref_from2.useRefFrom)(onEnd);
const onErrorRef = (0, import_use_ref_from2.useRefFrom)(onError);
const onStartRef = (0, import_use_ref_from2.useRefFrom)(onStart);
const handleClick = (0, import_react5.useCallback)(
(event) => {
onClickRef.current && onClickRef.current(event);
!event.isDefaultPrevented() && setStarted((started2) => !started2);
},
[onClickRef, setStarted]
);
const handleEnd = (0, import_react5.useCallback)(
(event) => {
setStarted(false);
onEndRef.current?.(event);
},
[onEndRef, setStarted]
);
const handleError = (0, import_react5.useCallback)(
(event) => {
setStarted(false);
onErrorRef.current?.(event);
},
[onErrorRef, setStarted]
);
const handleStart = (0, import_react5.useCallback)(
(event) => {
setStarted(true);
onStartRef.current?.(event);
},
[onStartRef, setStarted]
);
return /* @__PURE__ */ import_react5.default.createElement(
Composer_default,
{
continuous,
extra,
grammar,
lang,
onDictate,
onEnd: handleEnd,
onError: handleError,
onProgress,
onRawEvent,
onStart: handleStart,
speechGrammarList,
speechRecognition,
started: started && !disabled
},
/* @__PURE__ */ import_react5.default.createElement(DictateButtonCore, { className, disabled, onClick: handleClick }, children)
);
};
var DictateButton_default = DictateButton;
// src/DictateCheckbox.tsx
var import_react6 = __toESM(require("react"));
var import_use_ref_from3 = require("use-ref-from");
var DictateCheckboxCore = ({ children, className, disabled, onChange, started }) => {
const [readyState] = useReadyState();
const [supported] = useSupported();
return /* @__PURE__ */ import_react6.default.createElement("label", null, /* @__PURE__ */ import_react6.default.createElement(
"input",
{
checked: started,
className,
disabled: readyState === 1 || readyState === 3 || !supported || disabled,
onChange,
type: "checkbox"
}
), typeof children === "function" ? children({ readyState }) : children);
};
var DictateCheckbox = ({
children,
className,
continuous,
disabled,
extra,
grammar,
lang,
onDictate,
onEnd,
onError,
onProgress,
onRawEvent,
onStart,
speechGrammarList,
speechRecognition
}) => {
const [started, setStarted] = (0, import_react6.useState)(false);
const onEndRef = (0, import_use_ref_from3.useRefFrom)(onEnd);
const onErrorRef = (0, import_use_ref_from3.useRefFrom)(onError);
const onStartRef = (0, import_use_ref_from3.useRefFrom)(onStart);
const handleChange = (0, import_react6.useCallback)(
({ currentTarget: { checked } }) => setStarted(checked),
[setStarted]
);
const handleEnd = (0, import_react6.useCallback)(
(event) => {
setStarted(false);
onEndRef.current?.(event);
},
[onEndRef, setStarted]
);
const handleError = (0, import_react6.useCallback)(
(event) => {
setStarted(false);
onErrorRef.current?.(event);
},
[onErrorRef, setStarted]
);
const handleStart = (0, import_react6.useCallback)(
(event) => {
setStarted(true);
onStartRef.current?.(event);
},
[onStartRef, setStarted]
);
return /* @__PURE__ */ import_react6.default.createElement(
Composer_default,
{
continuous,
extra,
grammar,
lang,
onDictate,
onEnd: handleEnd,
onError: handleError,
onProgress,
onRawEvent,
onStart: handleStart,
speechGrammarList,
speechRecognition,
started: started && !disabled
},
/* @__PURE__ */ import_react6.default.createElement(DictateCheckboxCore, { className, disabled, onChange: handleChange, started }, children)
);
};
var DictateCheckbox_default = DictateCheckbox;
// src/hooks/useAbortable.ts
function useAbortable() {
const { abortable } = useDictateContext();
return Object.freeze([abortable]);
}
// src/index.ts
var Context2 = Context_default;
var DictateButton_ = DictateButton_default;
var src_default = DictateButton_;
// Annotate the CommonJS export names for ESM import in node:
0 && (module.exports = {
Composer,
Context,
DictateButton,
DictateCheckbox,
useAbortable,
useReadyState,
useSupported
});
//# sourceMappingURL=react-dictate-button.js.map