UNPKG

watson-speech

Version:

IBM Watson Speech to Text and Text to Speech SDK for web browsers.

7 lines (6 loc) 869 kB
// IBM Watson Speech JavaScript SDK // 0.40.0 // Generated at Tue Oct 25 14:47:11 CDT 2022 // Copyright IBM () // !function(e,n){"object"==typeof exports&&"object"==typeof module?module.exports=n():"function"==typeof define&&define.amd?define([],n):"object"==typeof exports?exports.WatsonSpeech=n():e.WatsonSpeech=n()}(self,function(){return(()=>{var __webpack_modules__={"./index.js":(__unused_webpack_module,exports,__webpack_require__)=>{"use strict";eval('/**\n * Copyright 2015 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the "License");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * IBM Watson Speech JavaScript SDK\n *\n * Top-level module includes the version, and both of the speech libraries.\n *\n * If using a bundler such as browserify, you may optionally include sub-modules directly to reduce the size of the final bundle\n *\n * @module watson-speech\n */\n\n/**\n * Release version\n *\n * (for pre-built bundles only - if using via npm, read the package.json to determine the version)\n *\n * (This previously did `require(\'package.json\').version` which works in browserify but causes webpack to choke with confusing errors unless extra plugins are included)\n *\n * envify automatically rewrites this during the release process\n */\n\nexports.version = "0.40.0";\n/**\n *\n * @see module:watson-speech/speech-to-text\n */\n\nexports.SpeechToText = __webpack_require__(/*! ./speech-to-text */ "./speech-to-text/index.js");\n/**\n *\n * @see module:watson-speech/text-to-speech\n */\n\nexports.TextToSpeech = __webpack_require__(/*! ./text-to-speech */ "./text-to-speech/index.js");\n\n//# sourceURL=webpack://WatsonSpeech/./index.js?')},"./speech-to-text/index.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval('/* provided dependency */ var Buffer = __webpack_require__(/*! buffer */ "./node_modules/buffer/index.js")["Buffer"];\n\n/**\n * IBM Watson Speech to Text JavaScript SDK\n *\n * The primary methods for interacting with the Speech to Text JS SDK are:\n * * `recognizeMicrophone()` for live microphone input\n * * `recognizeFile()` for file `<input>`\'s and other data sources (e.g. a Blob loaded via AJAX)\n *\n * However, the underlying streams and utils that they use are also exposed for advanced usage.\n *\n * @module watson-speech/speech-to-text\n */\n\nmodule.exports = {\n // "easy-mode" API\n\n /**\n * @see module:watson-speech/speech-to-text/recognize-microphone\n */\n recognizeMicrophone: __webpack_require__(/*! ./recognize-microphone */ "./speech-to-text/recognize-microphone.js"),\n\n /**\n * @see module:watson-speech/speech-to-text/recognize-blob\n */\n recognizeFile: __webpack_require__(/*! ./recognize-file */ "./speech-to-text/recognize-file.js"),\n\n /**\n * @see module:watson-speech/speech-to-text/get-models\n */\n getModels: __webpack_require__(/*! ./get-models */ "./speech-to-text/get-models.js"),\n // individual components to build more customized solutions\n\n /**\n * @see WebAudioL16Stream\n */\n WebAudioL16Stream: __webpack_require__(/*! ./webaudio-l16-stream */ "./speech-to-text/webaudio-l16-stream.js"),\n\n /**\n * @see RecognizeStream\n */\n RecognizeStream: __webpack_require__(/*! ./recognize-stream */ "./speech-to-text/recognize-stream.js"),\n\n /**\n * @see FilePlayer\n */\n FilePlayer: __webpack_require__(/*! ./file-player */ "./speech-to-text/file-player.js"),\n\n /**\n * @see FormatStream\n */\n FormatStream: __webpack_require__(/*! ./format-stream */ "./speech-to-text/format-stream.js"),\n\n /**\n * @see TimingStream\n */\n TimingStream: __webpack_require__(/*! ./timing-stream */ "./speech-to-text/timing-stream.js"),\n\n /**\n * @see ResultStream\n */\n ResultStream: __webpack_require__(/*! ./result-stream */ "./speech-to-text/result-stream.js"),\n\n /**\n * @see SpeakerStream\n */\n SpeakerStream: __webpack_require__(/*! ./speaker-stream */ "./speech-to-text/speaker-stream.js"),\n\n /**\n * @see WritableElementStream\n */\n WritableElementStream: __webpack_require__(/*! ./writable-element-stream */ "./speech-to-text/writable-element-stream.js"),\n // external components exposed for convenience\n\n /**\n * @see https://www.npmjs.com/package/get-user-media-promise\n */\n getUserMedia: __webpack_require__(/*! get-user-media-promise */ "./node_modules/get-user-media-promise/lib/get-user-media-promise.js"),\n\n /**\n * @see https://www.npmjs.com/package/microphone-stream\n */\n MicrophoneStream: __webpack_require__(/*! microphone-stream */ "./node_modules/microphone-stream/microphone-stream.js"),\n\n /**\n * @see https://nodejs.org/api/buffer.html\n */\n Buffer: Buffer\n};\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/index.js?')},"./text-to-speech/index.js":(__unused_webpack_module,exports,__webpack_require__)=>{"use strict";eval('/**\n * Copyright 2015 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the "License");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * @module watson-speech/text-to-speech\n */\n\n/**\n * @see module:watson-speech/text-to-speech/synthesize\n */\n\nexports.synthesize = __webpack_require__(/*! ./synthesize */ "./text-to-speech/synthesize.js");\n/**\n * @see module:watson-speech/text-to-speech/get-voices\n */\n\nexports.getVoices = __webpack_require__(/*! ./get-voices */ "./text-to-speech/get-voices.js");\n\n//# sourceURL=webpack://WatsonSpeech/./text-to-speech/index.js?')},"./speech-to-text/content-type.js":(__unused_webpack_module,exports,__webpack_require__)=>{"use strict";eval("\n\n__webpack_require__(/*! core-js/modules/es.regexp.to-string.js */ \"./node_modules/core-js/modules/es.regexp.to-string.js\");\n\nvar extname = __webpack_require__(/*! path */ \"./node_modules/path/path.js\").extname; // This module attempts to identify common content-types based on the filename or header.\n// It is not exhaustive, and for best results, you should always manually specify the content-type option.\n// See the complete list of supported content-types at\n// https://console.bluemix.net/docs/services/speech-to-text/input.html#formats\n// *some* file types can be identified by the first 3-4 bytes of the file\n\n\nvar headerContentTypes = {\n fLaC: 'audio/flac',\n RIFF: 'audio/wav',\n OggS: 'audio/ogg',\n ID3: 'audio/mp3',\n '\\u001aEߣ': 'audio/webm' // String for first four hex's of webm: [1A][45][DF][A3] (https://www.matroska.org/technical/specs/index.html#EBML)\n\n};\n/**\n * Takes the beginning of an audio file and returns the associated content-type / mime type\n *\n * @param {Buffer} buffer with at least the first 4 bytes of the file\n * @return {String|undefined} - the contentType of undefined\n */\n\nexports.fromHeader = function contentTypeFromHeader(buffer) {\n var headerStr = buffer.slice(0, 4).toString().substr(0, 4); // mp3's are only consistent for the first 3 characters\n\n return headerContentTypes[headerStr] || headerContentTypes[headerStr.substr(0, 3)];\n};\n\nvar filenameContentTypes = {\n '.mp3': 'audio/mp3',\n '.wav': 'audio/wav',\n '.flac': 'audio/flac',\n '.ogg': 'audio/ogg',\n '.oga': 'audio/ogg',\n '.opus': 'audio/ogg; codec=opus',\n '.webm': 'audio/webm'\n};\n/**\n * Guess the content type from the filename\n *\n * Note: Blob and File objects include a .type property, but we're ignoring it because it's frequently either\n * incorrect (e.g. video/ogg instead of audio/ogg) or else a different format than what's expected (e.g. audio/x-wav)\n *\n * @param {String|Blob|File} file - string filename or url, or binary File/Blob object\n * @return {String|undefined}\n */\n\nexports.fromFilename = function contentTypeFromFilename(file) {\n // todo: consider retrying with querystring & hash stripped from URLs\n var ext = extname(typeof file === 'string' && file || file.name || '');\n return filenameContentTypes[ext];\n};\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/content-type.js?")},"./speech-to-text/file-player.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval('\n\n__webpack_require__(/*! core-js/modules/es.promise.js */ "./node_modules/core-js/modules/es.promise.js");\n\n__webpack_require__(/*! core-js/modules/web.dom-collections.iterator.js */ "./node_modules/core-js/modules/web.dom-collections.iterator.js");\n\n__webpack_require__(/*! core-js/modules/web.url.js */ "./node_modules/core-js/modules/web.url.js");\n\nvar contentType = __webpack_require__(/*! ./content-type */ "./speech-to-text/content-type.js");\n\nvar bufferFrom = __webpack_require__(/*! buffer-from */ "./node_modules/buffer-from/index.js");\n/**\n * Plays audio from a URL\n * Compatible with Mobile Safari if triggered in direct response to a user interaction (e.g. click)\n * @param {String} url\n * @constructor\n */\n\n\nfunction UrlPlayer(url) {\n var audio = this.audio = new Audio();\n audio.src = url;\n audio.play();\n /**\n * Stops the audio\n */\n\n this.stop = function stop() {\n audio.pause();\n audio.currentTime = 0;\n };\n}\n/**\n * Plays audio from File/Blob instances\n * @param {File|Blob} file\n * @param {String} contentType\n * @constructor\n */\n\n\nfunction FilePlayer(file, contentType) {\n var audio = this.audio = new Audio();\n\n if (audio.canPlayType(contentType)) {\n audio.src = URL.createObjectURL(new Blob([file], {\n type: contentType\n }));\n audio.play();\n } else {\n // if we emit an error, it prevents the promise from returning the actual result\n // however, most browsers do not support flac, so this is a reasonably scenario\n var err = new Error(\'Current browser is unable to play back \' + contentType);\n err.name = FilePlayer.ERROR_UNSUPPORTED_FORMAT;\n err.contentType = contentType;\n throw err;\n }\n /**\n * Stops the audio\n */\n\n\n this.stop = function stop() {\n audio.pause();\n audio.currentTime = 0;\n };\n}\n\nFilePlayer.ERROR_UNSUPPORTED_FORMAT = \'UNSUPPORTED_FORMAT\';\n/**\n * Reads the first few bytes of a binary file and resolves to the content-type if recognized & supported\n * @param {File|Blob} file\n * @return {Promise}\n */\n\nfunction getContentTypeFromFile(file) {\n return new Promise(function (resolve, reject) {\n var blobToText = new Blob([file]).slice(0, 4);\n var r = new FileReader();\n r.readAsText(blobToText);\n\n r.onload = function () {\n var ct = contentType.fromHeader(bufferFrom(r.result));\n\n if (ct) {\n resolve(ct);\n } else {\n var err = new Error(\'Unable to determine content type from file header. Supported file types: wav, mp3, flac, ogg, and webm.\');\n err.name = FilePlayer.ERROR_UNSUPPORTED_FORMAT;\n reject(err);\n }\n };\n });\n}\n/**\n * Determines the file\'s content-type and then resolves to a FilePlayer instance\n * @param {File|Blob|String} file - binary data or URL of audio file (binary data playback may not work on mobile Safari)\n * @param {String} [contentType] - optional content-type, will be sniffed from file header if unspecified\n * @return {Promise.<FilePlayer>}\n */\n\n\nfunction playFile(file, contentType) {\n if (typeof file === \'string\') {\n return Promise.resolve(new UrlPlayer(file));\n }\n\n if (contentType) {\n return Promise.resolve(new FilePlayer(file, contentType));\n }\n\n return getContentTypeFromFile(file).then(function (sniffedContentType) {\n return new FilePlayer(file, sniffedContentType);\n });\n}\n\nmodule.exports = FilePlayer;\nmodule.exports.getContentType = getContentTypeFromFile;\nmodule.exports.playFile = playFile;\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/file-player.js?')},"./speech-to-text/format-stream.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval("\n\n__webpack_require__(/*! core-js/modules/es.regexp.to-string.js */ \"./node_modules/core-js/modules/es.regexp.to-string.js\");\n\n__webpack_require__(/*! core-js/modules/es.string.replace.js */ \"./node_modules/core-js/modules/es.string.replace.js\");\n\n__webpack_require__(/*! core-js/modules/es.string.trim.js */ \"./node_modules/core-js/modules/es.string.trim.js\");\n\nvar {\n Transform\n} = __webpack_require__(/*! readable-stream */ \"./node_modules/readable-stream/readable-browser.js\");\n\nvar util = __webpack_require__(/*! util */ \"./node_modules/util/util.js\");\n\nvar clone = __webpack_require__(/*! clone */ \"./node_modules/clone/clone.js\");\n\nvar defaults = __webpack_require__(/*! defaults */ \"./node_modules/defaults/index.js\");\n/**\n * Applies some basic formatting to transcriptions:\n * - Capitalize the first word of each sentence\n * - Add a period to the end\n * - Fix any \"cruft\" in the transcription\n * - etc.\n *\n * May be used as either a Stream, or a standalone helper.\n *\n * @param {Object} opts\n * @param {String} [opts.model] - some models / languages need special handling\n * @param {String} [opts.hesitation=''] - what to put down for a \"hesitation\" event, also consider \\u2026 (ellipsis: ...)\n * @param {Boolean} [options.objectMode=false] - emit `result` objects instead of string Buffers for the `data` events.\n * @constructor\n */\n\n\nfunction FormatStream(opts) {\n this.options = defaults(opts, {\n model: '',\n // some models should have all spaces removed\n hesitation: '',\n decodeStrings: false // false = don't convert strings to buffers before passing to _write\n\n });\n Transform.call(this, this.options);\n this.isJaCn = this.options.model.substring(0, 5) === 'ja-JP' || this.options.model.substring(0, 5) === 'zh-CN';\n this._transform = this.options.objectMode ? this.transformObject : this.transformString;\n}\n\nutil.inherits(FormatStream, Transform);\nvar reHesitation = /%HESITATION ?/g; // https://console.bluemix.net/docs/services/speech-to-text/output.html#output - D_ is handled below\n\nvar reRepeatedCharacter = /([a-z])\\1{2,}/gi; // detect the same character repeated three or more times and remove it\n\nvar reDUnderscoreWords = /D_[^\\s]+/g; // replace D_(anything)\n\n/**\n * Formats one or more words, removing special symbols, junk, and spacing for some languages\n * @param {String} text\n * @param {Boolean} isFinal\n * @return {String}\n */\n\nFormatStream.prototype.clean = function clean(text) {\n // clean out \"junk\"\n text = text.replace(reHesitation, this.options.hesitation ? this.options.hesitation.trim() + ' ' : this.options.hesitation).replace(reRepeatedCharacter, '').replace(reDUnderscoreWords, ''); // remove spaces for Japanese and Chinese\n\n if (this.isJaCn) {\n text = text.replace(/ /g, '');\n }\n\n return text.trim() + ' '; // we want exactly 1 space at the end\n};\n/**\n * Capitalizes the first word of a sentence\n * @param {String} text\n * @return {string}\n */\n\n\nFormatStream.prototype.capitalize = function capitalize(text) {\n // capitalize first word, returns '' in the case of an empty word\n return text.charAt(0).toUpperCase() + text.substring(1);\n};\n/**\n * Puts a period on the end of a sentence\n * @param {String} text\n * @return {string}\n */\n\n\nFormatStream.prototype.period = function period(text) {\n text = text.trim(); // don't put a period down if the clean stage remove all of the text\n\n if (!text) {\n return ' ';\n } // just add a space if the sentence ends in an ellipse\n\n\n if (text.substr(-1) === '\\u2026') {\n return text + ' ';\n }\n\n return text + (this.isJaCn ? '。' : '. ');\n};\n\nFormatStream.prototype.transformString = function (chunk, encoding, next) {\n this.push(this.formatString(chunk.toString()));\n next();\n};\n\nFormatStream.prototype.transformObject = function formatResult(result, encoding, next) {\n this.push(this.formatResult(result));\n next();\n};\n/**\n * Formats a single string result.\n *\n * May be used outside of Node.js streams\n *\n * @param {String} str - text to format\n * @param {bool} [isInterim=false] - set to true to prevent adding a period to the end of the sentence\n * @return {String}\n */\n\n\nFormatStream.prototype.formatString = function (str, isInterim) {\n str = this.capitalize(this.clean(str));\n return isInterim ? str : this.period(str);\n};\n/**\n * Creates a new result with all transcriptions formatted\n *\n * May be used outside of Node.js streams\n *\n * @param {Object} data\n * @return {Object}\n */\n\n\nFormatStream.prototype.formatResult = function formatResult(data) {\n data = clone(data);\n\n if (Array.isArray(data.results)) {\n data.results.forEach(function (result, i) {\n // if there are multiple interim results (as produced by the speaker stream),\n // treat the text as final in all but the last result\n var textFinal = result.final || i !== data.results.length - 1;\n result.alternatives = result.alternatives.map(function (alt) {\n alt.transcript = this.formatString(alt.transcript, !textFinal);\n\n if (alt.timestamps) {\n alt.timestamps = alt.timestamps.map(function (ts, j, arr) {\n // timestamps is an array of arrays, each sub-array is in the form [\"word\", startTime, endTime]'\n ts[0] = this.clean(ts[0]);\n\n if (j === 0) {\n ts[0] = this.capitalize(ts[0]);\n }\n\n if (j === arr.length - 1 && textFinal) {\n ts[0] = this.period(ts[0]);\n }\n\n return ts;\n }, this).filter(function (ts) {\n return ts[0]; // remove any timestamps without a word (due to cleaning out junk words)\n });\n }\n\n return alt;\n }, this);\n }, this);\n }\n\n return data;\n};\n\nFormatStream.prototype.promise = __webpack_require__(/*! ./to-promise */ \"./speech-to-text/to-promise.js\");\nmodule.exports = FormatStream;\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/format-stream.js?")},"./speech-to-text/get-models.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval('/**\n * Copyright 2015 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the "License");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an "AS IS" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n/**\n * @module watson-speech/speech-to-text/get-models\n */\n\n/**\n Returns a promise that resolves to an array of objects representing the available voice models. Example:\n\n ```js\n [{\n "url": "https://api.us-south.speech-to-text.watson.cloud.ibm.com/v1/models/en-UK_BroadbandModel",\n "rate": 16000,\n "name": "en-UK_BroadbandModel",\n "language": "en-UK",\n "description": "UK English broadband model."\n },\n //...\n ]\n ```\n Requires fetch, pollyfill available at https://github.com/github/fetch\n\n * @todo define format in @return statement\n * @param {Object} options\n * @param {String} options.url=https://api.us-south.speech-to-text.watson.cloud.ibm.com URL for Watson Speech to Text API\n * @param {String} options.token auth token for CF services\n * @param {String} options.accessToken IAM access token for RC services\n * @return {Promise<T>}\n */\n\n__webpack_require__(/*! core-js/modules/es.promise.js */ "./node_modules/core-js/modules/es.promise.js");\n\nmodule.exports = function getModels(options) {\n if (!options || !options.token && !options.accessToken) {\n throw new Error(\'Watson SpeechToText: missing required auth parameter: options.token (CF) or options.accessToken (RC)\');\n }\n\n var reqOpts = {\n credentials: \'omit\',\n headers: {\n accept: \'application/json\'\n }\n };\n var url = options.url || \'https://api.us-south.speech-to-text.watson.cloud.ibm.com\';\n\n if (options.accessToken) {\n url = url + \'/v1/models?access_token=\' + options.accessToken;\n } else {\n url = url + \'/v1/models?watson-token=\' + options.token;\n }\n\n return fetch(url, reqOpts).then(function (response) {\n return response.json();\n }).then(function (obj) {\n return obj.models;\n });\n};\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/get-models.js?')},"./speech-to-text/no-timestamps.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval("\n/**\n * Returns true if the result is missing it's timestamps\n * @param {Object} data\n * @return {Boolean}\n */\n\n__webpack_require__(/*! core-js/modules/es.string.trim.js */ \"./node_modules/core-js/modules/es.string.trim.js\");\n\nmodule.exports = function noTimestamps(data) {\n return data.results.some(function (result) {\n var alt = result.alternatives && result.alternatives[0];\n return !!(alt && (alt.transcript.trim() && !alt.timestamps || !alt.timestamps.length));\n });\n};\n\nmodule.exports.ERROR_NO_TIMESTAMPS = 'NO_TIMESTAMPS';\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/no-timestamps.js?")},"./speech-to-text/recognize-file.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval("/**\n * Copyright 2015 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n__webpack_require__(/*! core-js/modules/es.object.assign.js */ \"./node_modules/core-js/modules/es.object.assign.js\");\n\nvar BlobStream = __webpack_require__(/*! readable-blob-stream */ \"./node_modules/readable-blob-stream/index.js\");\n\nvar RecognizeStream = __webpack_require__(/*! ./recognize-stream.js */ \"./speech-to-text/recognize-stream.js\");\n\nvar FilePlayer = __webpack_require__(/*! ./file-player.js */ \"./speech-to-text/file-player.js\");\n\nvar FormatStream = __webpack_require__(/*! ./format-stream.js */ \"./speech-to-text/format-stream.js\");\n\nvar TimingStream = __webpack_require__(/*! ./timing-stream.js */ \"./speech-to-text/timing-stream.js\");\n\nvar WritableElementStream = __webpack_require__(/*! ./writable-element-stream */ \"./speech-to-text/writable-element-stream.js\");\n\nvar ResultStream = __webpack_require__(/*! ./result-stream */ \"./speech-to-text/result-stream.js\");\n\nvar SpeakerStream = __webpack_require__(/*! ./speaker-stream */ \"./speech-to-text/speaker-stream.js\");\n\nvar contentType = __webpack_require__(/*! ./content-type */ \"./speech-to-text/content-type.js\");\n\nvar fetch = __webpack_require__(/*! nodeify-fetch */ \"./node_modules/nodeify-fetch/browser.js\"); // like regular fetch, but with an extra method on the response to get a node-style ReadableStream\n\n/**\n * @module watson-speech/speech-to-text/recognize-file\n */\n\n/**\n * Create and return a RecognizeStream from a File or Blob\n * (e.g. from a file <input>, a dragdrop target, or an ajax request)\n *\n * @param {Object} options - Also passed to {MediaElementAudioStream} and to {RecognizeStream}\n * @param {String} [options.url='wss://api.us-south.speech-to-text.watson.cloud.ibm.com'] - Base URL for a service instance\n * @param {String} options.token - Auth Token for CF services - see https://github.com/watson-developer-cloud/node-sdk#authorization\n * @param {String} options.accessToken - IAM Access Token for RC services - see https://github.com/watson-developer-cloud/node-sdk#authorization\n * @param {Blob|FileString} options.file - String url or the raw audio data as a Blob or File instance to be transcribed (and optionally played). Playback may not with with Blob or File on mobile Safari.\n * @param {Boolean} [options.play=false] - If a file is set, play it locally as it's being uploaded\n * @param {Boolena} [options.format=true] - pipe the text through a {FormatStream} which performs light formatting. Also controls smart_formatting option unless explicitly set.\n * @param {Boolena} [options.realtime=options.play] - pipe the text through a {TimingStream} which slows the output down to real-time to match the audio playback.\n * @param {String|DOMElement} [options.outputElement] pipe the text to a WriteableElementStream targeting the specified element. Also defaults objectMode to true to enable interim results.\n * @param {Boolean} [options.extractResults=false] pipe results through a ResultExtractor stream to simplify the objects. (Default behavior before v0.22) Automatically enables objectMode.\n * @param {Boolean} [options.resultsBySpeaker=false] pipe results through a SpeakerStream. Causes each data event to include multiple results, each with a speaker field. Automatically enables objectMode and speaker_labels. Adds some delay to processing.\n *\n * @return {RecognizeStream|SpeakerStream|FormatStream|ResultStream|TimingStream}\n */\n\n\nmodule.exports = function recognizeFile(options) {\n // eslint-disable-line complexity\n if (!options || !options.token && !options.accessToken) {\n throw new Error('WatsonSpeechToText: missing required parameter: opts.token (CF) or opts.accessToken (RC)');\n }\n\n if (options.data && !options.file) {\n options.file = options.data;\n delete options.data;\n\n if (!options.silent) {\n // eslint-disable-next-line no-console\n console.log(new Error('WatsonSpeechToText recognizeFile(): Warning data option was renamed to file. Set silent: true to hide this warning.'));\n }\n } // the WritableElementStream works best in objectMode\n\n\n if (options.outputElement && options.objectMode !== false) {\n options.objectMode = true;\n } // the ResultExtractor only works in objectMode\n\n\n if (options.extractResults) {\n options.objectMode = true;\n } // SpeakerStream requires objectMode and speaker_labels\n\n\n if (options.resultsBySpeaker) {\n options.objectMode = true;\n options.speakerLabels = true;\n } // default format to true (capitals and periods)\n // default smartFormatting to options.format value (dates, currency, etc.)\n\n\n options.format = options.format !== false;\n\n if (typeof options.smartFormatting === 'undefined') {\n options.smartFormatting = options.format;\n }\n\n var realtime = options.realtime || typeof options.realtime === 'undefined' && options.play; // the timing stream requires timestamps to work, so enable them automatically\n\n if (realtime) {\n options.timestamps = true;\n } // Attempt to guess content-type based on filename\n // If this fails, recognizeStream will make a second attempt based on the file header\n\n\n if (!options.contentType) {\n options.contentType = contentType.fromFilename(options.file);\n }\n\n var rsOpts = Object.assign({\n interimResults: true\n }, options);\n var recognizeStream = new RecognizeStream(rsOpts);\n var streams = [recognizeStream]; // collect all of the streams so that we can bundle up errors and send them to the last one\n\n var stream = recognizeStream;\n\n if (typeof options.file === 'string') {\n fetch(options.file).then(function (response) {\n // old behavior https://github.com/bergos/nodeify-fetch/blob/v1.0.1/lib/patch-response.js#L23\n //return response.readable();\n // new behavior https://github.com/bergos/nodeify-fetch/blob/v2.2.2/lib/patchResponse.js\n // seems like we just have to check if the body is readable\n if (response.body.readable) {\n return response.body;\n } else {\n return new Error(\"file is not a readable stream\");\n }\n }).then(function (source) {\n source.pipe(recognizeStream);\n streams.unshift(source);\n }).catch(function (er) {\n recognizeStream.emit('error', er);\n });\n } else {\n var source = new BlobStream(options.file);\n source.pipe(recognizeStream);\n streams.unshift(source);\n } // note: the TimingStream cannot currently handle results as regrouped by the SpeakerStream\n // so it must come first\n\n\n var timingStream;\n\n if (realtime) {\n timingStream = new TimingStream(options);\n stream = stream.pipe(timingStream);\n streams.push(stream);\n stream.on('stop', recognizeStream.stop.bind(recognizeStream));\n } else {\n stream.stop = recognizeStream.stop.bind(recognizeStream);\n }\n\n if (options.resultsBySpeaker) {\n stream = stream.pipe(new SpeakerStream(options));\n streams.push(stream);\n } // note: the format stream should come after the speaker stream to format sentences correctly\n\n\n if (options.format) {\n stream = stream.pipe(new FormatStream(options));\n streams.push(stream);\n }\n\n if (options.play) {\n // when file playback actually begins\n // (mostly important for downloaded files)\n FilePlayer.playFile(options.file, options.contentType).then(function (player) {\n recognizeStream.on('stop', player.stop.bind(player));\n recognizeStream.on('error', player.stop.bind(player)); // for files loaded via URL, restet the start time of the timing stream to when it begins playing\n\n if (timingStream && typeof options.file === 'string') {\n // eslint-disable-next-line func-style\n var fn = function fn() {\n timingStream.setStartTime(); // defaults to Date.now()\n\n player.audio.removeEventListener('playing', fn);\n };\n\n player.audio.addEventListener('playing', fn);\n }\n }).catch(function (err) {\n // Node.js automatically unpipes any source stream(s) when an error is emitted (on the assumption that the previous stream's output caused the error.)\n // In this case, we don't want that behavior - a playback error should not stop the transcription\n // So, we have to:\n // 1. find the source streams\n // 2. emit the error (causing the automatic unpipe)\n // 3. re-pipe the source streams\n var sources = streams.filter(function (s) {\n return s._readableState && s._readableState.pipes && (s._readableState.pipes === stream || Array.isArray(s._readableState.pipes) && s._readableState.pipes.indexOf(stream) !== -1);\n });\n stream.emit('error', err);\n sources.forEach(function (s) {\n s.pipe(stream);\n });\n });\n }\n\n if (options.outputElement) {\n // we don't want to return the WES, just send data to it\n streams.push(stream.pipe(new WritableElementStream(options)));\n }\n\n if (options.extractResults) {\n var stop = stream.stop ? stream.stop.bind(stream) : recognizeStream.stop.bind(recognizeStream);\n stream = stream.pipe(new ResultStream());\n stream.stop = stop;\n streams.push(stream);\n } // Capture errors from any stream except the last one and emit them on the last one\n\n\n streams.forEach(function (prevStream) {\n if (prevStream !== stream) {\n prevStream.on('error', stream.emit.bind(stream, 'error'));\n }\n });\n\n if (!stream.stop) {\n if (timingStream) {\n stream.stop = timingStream.stop.bind(timingStream);\n } else {\n stream.stop = recognizeStream.stop.bind(recognizeStream);\n }\n } // expose the original stream to for debugging (and to support the JSON tab on the STT demo)\n\n\n stream.recognizeStream = recognizeStream;\n return stream;\n};\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/recognize-file.js?")},"./speech-to-text/recognize-microphone.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval("/**\n * Copyright 2015 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n__webpack_require__(/*! core-js/modules/es.object.assign.js */ \"./node_modules/core-js/modules/es.object.assign.js\");\n\n__webpack_require__(/*! core-js/modules/es.promise.js */ \"./node_modules/core-js/modules/es.promise.js\");\n\nvar getUserMedia = __webpack_require__(/*! get-user-media-promise */ \"./node_modules/get-user-media-promise/lib/get-user-media-promise.js\");\n\nvar MicrophoneStream = __webpack_require__(/*! microphone-stream */ \"./node_modules/microphone-stream/microphone-stream.js\");\n\nvar RecognizeStream = __webpack_require__(/*! ./recognize-stream.js */ \"./speech-to-text/recognize-stream.js\");\n\nvar L16 = __webpack_require__(/*! ./webaudio-l16-stream.js */ \"./speech-to-text/webaudio-l16-stream.js\");\n\nvar FormatStream = __webpack_require__(/*! ./format-stream.js */ \"./speech-to-text/format-stream.js\");\n\nvar WritableElementStream = __webpack_require__(/*! ./writable-element-stream */ \"./speech-to-text/writable-element-stream.js\");\n\nvar {\n Writable\n} = __webpack_require__(/*! readable-stream */ \"./node_modules/readable-stream/readable-browser.js\");\n\nvar ResultStream = __webpack_require__(/*! ./result-stream */ \"./speech-to-text/result-stream.js\");\n\nvar SpeakerStream = __webpack_require__(/*! ./speaker-stream */ \"./speech-to-text/speaker-stream.js\");\n\nvar preservedMicStream;\nvar bitBucket = new Writable({\n write: function write(chunk, encoding, callback) {\n // when the keepMicrophone option is enabled, unused audio data is sent here so that it isn't buffered by other streams.\n callback();\n },\n objectMode: true,\n // can still accept strings/buffers\n decodeStrings: false\n});\n/**\n * @module watson-speech/speech-to-text/recognize-microphone\n */\n\n/**\n * Create and return a RecognizeStream sourcing audio from the user's microphone\n *\n * @param {Object} options - Also passed to {RecognizeStream}, and {FormatStream} when applicable\n * @param {String} options.token - Auth Token for CF services - see https://github.com/watson-developer-cloud/node-sdk#authorization\n * @param {String} options.accessToken - IAM Access Token for RC services - see https://github.com/watson-developer-cloud/node-sdk#authorization\n * @param {String} [options.url='wss://api.us-south.speech-to-text.watson.cloud.ibm.com'] - Base URL for a service instance\n * @param {Boolean} [options.format=true] - pipe the text through a FormatStream which performs light formatting. Also controls smart_formatting option unless explicitly set.\n * @param {Boolean} [options.keepMicrophone=false] - keeps an internal reference to the microphone stream to reuse in subsequent calls (prevents multiple permissions dialogs in firefox)\n * @param {String|DOMElement} [options.outputElement] pipe the text to a [WriteableElementStream](WritableElementStream.html) targeting the specified element. Also defaults objectMode to true to enable interim results.\n * @param {Boolean} [options.extractResults=false] pipe results through a ResultStream stream to simplify the objects. (Default behavior before v0.22) Requires objectMode.\n * @param {Boolean} [options.resultsBySpeaker=false] Pipe results through a SpeakerStream. Forces speaker_labels and objectMode to be true.\n * @param {MediaStream} [options.mediaStream] Optionally pass in an existing MediaStream\n *\n * @return {RecognizeStream|SpeakerStream|FormatStream|ResultStream}\n */\n\nmodule.exports = function recognizeMicrophone(options) {\n if (!options || !options.token && !options.accessToken) {\n throw new Error('WatsonSpeechToText: missing required parameter: opts.token (CF) or opts.accessToken (RC)');\n } // the WritableElementStream works best in objectMode\n\n\n if (options.outputElement && options.objectMode !== false) {\n options.objectMode = true;\n } // the ResultExtractor only works in objectMode\n\n\n if (options.extractResults) {\n options.objectMode = true;\n } // SpeakerStream requires objectMode and speakerLabels\n\n\n if (options.resultsBySpeaker) {\n options.objectMode = true;\n options.speakerLabels = true;\n } // default format to true (capitals and periods)\n // default smartFormatting to options.format value (dates, currency, etc.)\n\n\n options.format = options.format !== false;\n\n if (typeof options.smartFormatting === 'undefined') {\n options.smartFormatting = options.format;\n }\n\n var rsOpts = Object.assign({\n contentType: 'audio/l16;rate=16000',\n interimResults: true\n }, options);\n var recognizeStream = new RecognizeStream(rsOpts);\n var streams = [recognizeStream]; // collect all of the streams so that we can bundle up errors and send them to the last one\n // set up the output first so that we have a place to emit errors\n // if there's trouble with the input stream\n\n var stream = recognizeStream;\n var keepMic = options.keepMicrophone;\n var micStream;\n\n if (keepMic && preservedMicStream) {\n preservedMicStream.unpipe(bitBucket);\n micStream = preservedMicStream;\n } else {\n // create the MicrophoneStream synchronously to allow it to resume the context in Safari on iOS 11\n micStream = new MicrophoneStream({\n objectMode: true,\n bufferSize: options.bufferSize\n });\n var pm = options.mediaStream ? Promise.resolve(options.mediaStream) : getUserMedia({\n video: false,\n audio: true\n });\n pm.then(function (mediaStream) {\n micStream.setStream(mediaStream);\n\n if (keepMic) {\n preservedMicStream = micStream;\n }\n }).catch(function (err) {\n stream.emit('error', err);\n\n if (err.name === 'NotSupportedError') {\n stream.end(); // end the stream\n }\n });\n }\n\n var l16Stream = new L16({\n writableObjectMode: true\n });\n micStream.pipe(l16Stream).pipe(recognizeStream);\n streams.push(micStream, l16Stream);\n /**\n * unpipes the mic stream to prevent any more audio from being sent over the wire\n * temporarily re-pipes it to the bitBucket (basically /dev/null) becuse\n * otherwise it will buffer the audio from in between calls and prepend it to the next one\n *\n * @private\n */\n\n function end() {\n micStream.unpipe(l16Stream);\n micStream.pipe(bitBucket);\n l16Stream.end();\n } // trigger on both stop and end events:\n // stop will not fire when a stream ends due to a timeout\n // but when stop does fire, we want to honor it immediately\n // end will always fire, but it may take a few moments after stop\n\n\n if (keepMic) {\n recognizeStream.on('end', end);\n recognizeStream.on('stop', end);\n } else {\n recognizeStream.on('end', micStream.stop.bind(micStream));\n recognizeStream.on('stop', micStream.stop.bind(micStream));\n }\n\n if (options.resultsBySpeaker) {\n stream = stream.pipe(new SpeakerStream(options));\n streams.push(stream);\n }\n\n if (options.format) {\n stream = stream.pipe(new FormatStream(options));\n streams.push(stream);\n }\n\n if (options.outputElement) {\n // we don't want to return the WES, just send data to it\n streams.push(stream.pipe(new WritableElementStream(options)));\n }\n\n if (options.extractResults) {\n stream = stream.pipe(new ResultStream());\n streams.push(stream);\n } // Capture errors from any stream except the last one and emit them on the last one\n\n\n streams.forEach(function (prevStream) {\n if (prevStream !== stream) {\n prevStream.on('error', stream.emit.bind(stream, 'error'));\n }\n });\n\n if (stream !== recognizeStream) {\n // add a stop button to whatever the final stream ends up being\n stream.stop = recognizeStream.stop.bind(recognizeStream);\n } // expose the original stream to for debugging (and to support the JSON tab on the STT demo)\n\n\n stream.recognizeStream = recognizeStream;\n return stream;\n};\n\nmodule.exports.isSupported = getUserMedia.isSupported;\n\n//# sourceURL=webpack://WatsonSpeech/./speech-to-text/recognize-microphone.js?")},"./speech-to-text/recognize-stream.js":(module,__unused_webpack_exports,__webpack_require__)=>{"use strict";eval("/* provided dependency */ var process = __webpack_require__(/*! process/browser.js */ \"./node_modules/process/browser.js\");\n/**\n * Copyright 2014 IBM Corp. All Rights Reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\n__webpack_require__(/*! core-js/modules/es.string.replace.js */ \"./node_modules/core-js/modules/es.string.replace.js\");\n\nvar {\n Duplex\n} = __webpack_require__(/*! readable-stream */ \"./node_modules/readable-stream/readable-browser.js\");\n\nvar util = __webpack_require__(/*! util */ \"./node_modules/util/util.js\");\n\nvar W3CWebSocket = __webpack_require__(/*! websocket */ \"./node_modules/websocket/lib/browser.js\").w3cwebsocket;\n\nvar contentType = __webpack_require__(/*! ./content-type */ \"./speech-to-text/content-type.js\");\n\nvar processUserParameters = __webpack_require__(/*! ../util/process-user-parameters.js */ \"./util/process-user-parameters.js\");\n\nvar qs = __webpack_require__(/*! ../util/querystring.js */ \"./util/querystring.js\");\n/**\n * pipe()-able Node.js Duplex stream - accepts binary audio and emits text/objects in it's `data` events.\n *\n * Uses WebSockets under the hood. For audio with no recognizable speech, no `data` events are emitted.\n *\n * By default, only finalized text is emitted in the data events, however when `objectMode`/`readableObjectMode` and `interim_results` are enabled, both interim and final results objects are emitted.\n * WriteableElementStream uses this, for example, to live-update the DOM with word-by-word transcriptions.\n *\n * Note that the WebSocket connection is not established until the first chunk of data is recieved. This allows for auto-detection of content type (for wav/flac/opus audio).\n *\n * @param {Options} options\n * @param {string} [options.url] - Base url for service (default='wss://api.us-south.speech-to-text.watson.cloud.ibm.com')\n * @param {OutgoingHttpHeaders} [options.headers] - Only works in Node.js, not in browsers. Allows for custom headers to be set, including an Authorization header (preventing the need for auth tokens)\n * @param {boolean} [options.readableObjectMode] - Emit `result` objects instead of string Buffers for the `data` events. Does not affect input (which must be binary)\n * @param {boolean} [options.objectMode] - Alias for readableObjectMode\n * @param {string} [options.accessToken] - Bearer token to put in query string\n * @param {string} [options.model] - The identifier of the model that is to be used for all recognition requests sent over the connection\n * @param {string} [options.languageCustomizationId] - The customization ID (GUID) of a custom language model that is to be used for all requests sent over the connection\n * @param {string} [options.acousticCustomizationId] - The customization ID (GUID) of a custom acoustic model that is to be used for the request\n * @param {string} [options.baseModelVersion] - The version of the specified base model that is to be used for all requests sent over the connection\n * @param {boolean} [options.xWatsonLearningOptOut] - Indicates whether IBM can use data that is sent over the connection to improve the service for future users (default=false)\n * @param {string} [options.xWatsonMetadata] - Associates a customer ID with all data that is passed over the connection. The parameter accepts the argument customer_id={id}, where {id} is a random or generic string that is to be associated with the data\n * @param {string} [options.contentType] - The format (MIME type) of the audio\n * @param {number} [options.customizationWeight] - Tell the service how much weight to give to words from the custom language model compared to those from the base model for the current request\n * @param {number} [options.inactivityTimeout] - The time in seconds after which, if only silence (no speech) is detected in the audio, the connection is closed (default=30)\n * @param {boolean} [options.interimResults] - If true, the service returns interim results as a stream of JSON SpeechRecognitionResults objects (default=false)\n * @param {string[]} [options.keywords] - An array of keyword strings to spot in the audio\n * @param {number} [options.keywordsThreshold] - A confidence value that is the lower bound for spotting a keyword\n * @param {number} [options.maxAlternatives] - The maximum number of alternative transcripts that the service is to return (default=1)\n * @param {number} [options.wordAlternativesThreshold] - A confidence value that is the lower bound for identifying a hypothesis as a possible word alternative\n * @param {boolean} [options.wordConfidence] - If true, the service returns a confidence measure in the range of 0.0 to 1.0 for each word (default=false)\n * @param {boolean} [options.timestamps] - If true, the service returns time alignment for each word (default=false)\n * @param {boolean} [options.profanityFilter] - If true, the service filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks (default=true)\n * @param {boolean} [options.smartFormatting] - If true, the service converts dates, times, series of digits and numbers, phone numbers, currency values, and internet addresses into more readable, conventional representations (default=false)\n * @param {boolean} [options.speakerLabels] - If true, the response includes labels that identify which words were spoken by which participants in a multi-person exchange (default=false)\n * @param {string} [options.grammarName] - The name of a grammar that is to be used with the recognition request\n * @param {boolean} [options.redaction] - If true, the service redacts, or masks, numeric data from final transcripts (default=false)\n * @param {boolean} [options.processingMetrics] - If true, requests processing metrics about the service's transcription of the input audio (default=false)\n * @param {number} [options.processingMetricsInterval] - Specifies the interval in seconds at which the service is to return processing metrics\n * @param {boolean} [options.audioMetrics] - If true, requests detailed information about the signal characteristics of the input audio (detailed=false)\n * @param {number} [options.endOfPhraseSilenceTime] - If true, specifies the duration of the pause interval at which the service splits a transcript into multiple final results. Specify a value for the pause interval in the range of 0.0 to 120.0 (default=0.8)\n * @param {boolean} [options.splitTranscriptAtPhraseEnd] - If true, directs the service to split the transcript into multiple final results based on semantic features of the input, for example, at the conclusion of meaningful phrases such as sentences (default=false)\n * @param {number} [options.speechDetectorSensitivity] - The sensitivity of speech activity detection that the service is to perform. Specify a value between 0.0 and 1.0 (default=0.5)\n * @param {number} [options.backgroundAudioSuppression] - The level to which the service is to suppress background audio based on its volume to prevent it from being transcribed as speech. Specify a value between 0.0 and 1.0 (default=0.0)\n * @param {boolean} [options.lowLatency] - If `true` for next-generation `Multimedia` and `Telephony` models that support low latency, directs the service to produce results even more quickly than it usually does.\n * @param {number} [options.characterInsertionBias] - Indicate that the service is to favor shorter or longer strings as it considers subsequent characters for its hypotheses. Specify a value between -1.0 and 1.0 (default=0.0)\n*\n * @constructor\n */\n\n\nfunction RecognizeStream(options) {\n // this stream only supports objectMode on the output side.\n // It must receive binary data input.\n if (options.objectMode) {\n