microsoft-cognitiveservices-speech-sdk
Version:
Microsoft Cognitive Services Speech SDK for JavaScript
1 lines • 52.9 kB
Source Map (JSON)
{"version":3,"sources":["src/common.speech/ServiceRecognizerBase.ts"],"names":[],"mappings":"AAKA,OAAO,EAGH,eAAe,EAGf,WAAW,EACX,YAAY,EACZ,gBAAgB,EAChB,WAAW,EACX,WAAW,EAGX,YAAY,EAEf,MAAM,sBAAsB,CAAC;AAE9B,OAAO,EAAE,uBAAuB,EAAE,MAAM,mCAAmC,CAAC;AAC5E,OAAO,EACH,qBAAqB,EACrB,kBAAkB,EAGlB,UAAU,EAEV,wBAAwB,EACxB,uBAAuB,EAE1B,MAAM,mBAAmB,CAAC;AAC3B,OAAO,EAAE,QAAQ,EAAE,MAAM,uCAAuC,CAAC;AACjE,OAAO,EACH,WAAW,EACX,qBAAqB,EAErB,eAAe,EACf,cAAc,EACd,aAAa,EAIhB,MAAM,cAAc,CAAC;AACtB,OAAO,EAEH,eAAe,EAClB,MAAM,sBAAsB,CAAC;AAC9B,OAAO,EAAE,kBAAkB,EAAE,MAAM,yBAAyB,CAAC;AAC7D,OAAO,EAAE,gBAAgB,EAAE,MAAM,uBAAuB,CAAC;AACzD,OAAO,EAAE,uBAAuB,EAAE,MAAM,uCAAuC,CAAC;AAEhF,UAAU,WAAW;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;CACpB;AAED,MAAM,WAAW,eAAe;IAC5B,YAAY,CAAC,EAAE,WAAW,EAAE,CAAC;IAC7B,SAAS,CAAC,EAAE;QAAE,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAC/B,SAAS,CAAC,EAAE;QAAE,MAAM,EAAE,MAAM,CAAA;KAAE,CAAC;IAC/B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,WAAW,CAAC,EAAE,YAAY,CAAC;IAC3B,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B,SAAS,CAAC,EAAE,YAAY,CAAC;IACzB,kBAAkB,CAAC,EAAE,kBAAkB,CAAC;CAC3C;AAED,MAAM,WAAW,kBAAkB;IAC/B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B,oBAAoB,CAAC,EAAE,OAAO,CAAC;CAClC;AAED,MAAM,WAAW,YAAY;IACzB,YAAY,EAAE;QACV,IAAI,EAAE,MAAM,CAAC;QACb,4BAA4B,CAAC,EAAE,MAAM,CAAC;QACtC,2BAA2B,CAAC,EAAE,MAAM,CAAC;KACxC,CAAC;CACL;AAED,8BAAsB,qBAAsB,YAAW,WAAW;IAC9D,OAAO,CAAC,kBAAkB,CAAkB;IAC5C,OAAO,CAAC,qBAAqB,CAAqB;IAIlD,OAAO,CAAC,kCAAkC,CAAmC;IAI7E,OAAO,CAAC,qBAAqB,CAAmC;IAChE,OAAO,CAAC,oBAAoB,CAAS;IACrC,OAAO,CAAC,cAAc,CAAU;IAChC,OAAO,CAAC,yBAAyB,CAAU;IAC3C,OAAO,CAAC,oBAAoB,CAA+B;IAC3D,OAAO,CAAC,iBAAiB,CAA4B;IACrD,OAAO,CAAC,kBAAkB,CAAwB;IAClD,OAAO,CAAC,eAAe,CAAc;IACrC,OAAO,CAAC,yBAAyB,CAAU;IAC3C,OAAO,CAAC,oBAAoB,CAAS;IACrC,OAAO,CAAC,cAAc,CAAyD;IAC/E,OAAO,CAAC,eAAe,CAAe;IACtC,OAAO,CAAC,eAAe,CAAkB;IACzC,OAAO,CAAC,qBAAqB,CAAa;IAC1C,SAAS,CAAC,iBAAiB,EAAE,aAAa,CAAC;IAC3C,SAAS,CAAC,kBAAkB,EAAE,cAAc,CAAC;IAC7C,SAAS,CAAC,gBAAgB,EAAE,MAAM,CAAC;IACnC,SAAS,CAAC,wBAAwB,EAAE,MAAM,CAAC;IAC3C,SAAS,CAAC,oBAAoB,EAAE,gBAAgB,CAAC;IACjD,SAAS,CAAC,cAAc,EAAE,UAAU,CAAC;IACrC,SAAS,CAAC,mBAAmB,EAAE,CAAC,CAAC,EAAE,uBAAuB,KAAK,IAAI,CAAC;IACpE,SAAS,CAAC,iBAAiB,EAAE,CAAC,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC;IACjD,SAAS,CAAC,mBAAmB,EAAE,OAAO,CAAS;IAC/C,SAAS,CAAC,mCAAmC,EAAE,OAAO,CAAS;gBAG3D,cAAc,EAAE,eAAe,EAC/B,iBAAiB,EAAE,kBAAkB,EACrC,WAAW,EAAE,YAAY,EACzB,gBAAgB,EAAE,gBAAgB,EAClC,UAAU,EAAE,UAAU;IAsE1B,SAAS,CAAC,kBAAkB,IAAI,IAAI;IAwBpC,SAAS,CAAC,gCAAgC,IAAI,IAAI;IAoDlD,SAAS,CAAC,iBAAiB,IAAI,IAAI;IAmDnC,SAAS,CAAC,wBAAwB,IAAI,IAAI;IAc1C,IAAW,2BAA2B,IAAI,OAAO,CAEhD;IAED,IAAW,WAAW,IAAI,YAAY,CAErC;IAED,IAAW,aAAa,IAAI,aAAa,CAExC;IAED,IAAW,cAAc,IAAI,qBAAqB,CAEjD;IAED,IAAW,WAAW,IAAI,WAAW,CAEpC;IAED,IAAW,2BAA2B,CAAC,KAAK,EAAE,MAAM,EAEnD;IAED,IAAW,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAEvC;IAED,IAAW,cAAc,CAAC,IAAI,EAAE,eAAe,EAE9C;IAEM,UAAU,IAAI,OAAO;IAIf,OAAO,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAapD,IAAW,gBAAgB,IAAI,WAAW,CAAC,eAAe,CAAC,CAE1D;IAED,IAAW,aAAa,IAAI,WAAW,CAAC,YAAY,CAAC,CAEpD;IAED,IAAW,eAAe,IAAI,eAAe,CAE5C;IAED,SAAS,CAAC,iBAAiB,EAAE,CAAC,QAAQ,EAAE,eAAe,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,uBAAuB,KAAK,IAAI,EAAE,EAAE,EAAE,CAAC,CAAC,EAAE,MAAM,KAAK,IAAI,KAAK,OAAO,CAAC,IAAI,CAAC,CAAa;IAElJ,gBAAgB,EAAE,CAAC,KAAK,EAAE,uBAAuB,KAAK,OAAO,CAAC,wBAAwB,CAAC,CAAa;IAE9F,SAAS,CAClB,QAAQ,EAAE,eAAe,EACzB,eAAe,EAAE,CAAC,CAAC,EAAE,uBAAuB,KAAK,IAAI,EACrD,aAAa,EAAE,CAAC,CAAC,EAAE,MAAM,KAAK,IAAI,GACnC,OAAO,CAAC,IAAI,CAAC;IA4DH,eAAe,IAAI,OAAO,CAAC,IAAI,CAAC;IAchC,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;IAK9B,YAAY,CAAC,EAAE,CAAC,EAAE,QAAQ,EAAE,GAAG,CAAC,EAAE,QAAQ,GAAG,IAAI;IAsBxD,SAAS,CAAC,kBAAkB,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,CAAa;IAEjD,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC;IAqBxC,OAAc,aAAa,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACpD,OAAc,oBAAoB,EAAE,OAAO,CAAQ;IAG5C,WAAW,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAIrC,kBAAkB,CAAC,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC;IAQ3F,IAAW,gBAAgB,CAAC,cAAc,EAAE,MAAM,EAEjD;IAED,IAAW,gBAAgB,IAAI,MAAM,CAEpC;IAED,IAAW,+BAA+B,CAAC,KAAK,EAAE,OAAO,EAExD;IAED,SAAS,CAAC,QAAQ,CAAC,2BAA2B,CAC1C,iBAAiB,EAAE,uBAAuB,EAC1C,eAAe,CAAC,EAAE,CAAC,CAAC,EAAE,uBAAuB,KAAK,IAAI,EACtD,aAAa,CAAC,EAAE,CAAC,CAAC,EAAE,MAAM,KAAK,IAAI,GAAG,OAAO,CAAC,OAAO,CAAC;cAE1C,iBAAiB,IAAI,OAAO,CAAC,IAAI,CAAC;IAyBlD,SAAS,CAAC,QAAQ,CAAC,iBAAiB,CAChC,SAAS,EAAE,MAAM,EACjB,SAAS,EAAE,MAAM,EACjB,kBAAkB,EAAE,kBAAkB,EACtC,SAAS,EAAE,qBAAqB,EAChC,KAAK,EAAE,MAAM,GAAG,IAAI;cAGR,sBAAsB,CAClC,kBAAkB,EAAE,kBAAkB,EACtC,SAAS,EAAE,qBAAqB,EAChC,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;IAcjC,SAAS,CAAC,sBAAsB,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,CAAa;cAElD,cAAc,IAAI,OAAO,CAAC,IAAI,CAAC;IAsF/C,OAAO,CAAC,mCAAmC;IAM3C,SAAS,CAAC,iBAAiB,CAAC,UAAU,EAAE,WAAW,EAAE,oBAAoB,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAoBlG,SAAS,CAAC,0BAA0B,EAAE,CAAC,UAAU,EAAE,WAAW,KAAK,OAAO,CAAC,IAAI,CAAC,CAAa;IAE7F,SAAS,CAAC,IAAI,IAAI,OAAO,CAAC,IAAI,CAAC;cAMf,kBAAkB,CAAC,UAAU,EAAE,WAAW,EAAE,oBAAoB,GAAE,OAAc,GAAG,OAAO,CAAC,IAAI,CAAC;cAUhG,cAAc,CAAC,UAAU,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC;IAYtE,SAAS,CAAC,uBAAuB,EAAE,CAAC,UAAU,EAAE,OAAO,CAAC,WAAW,CAAC,KAAK,OAAO,CAAC,WAAW,CAAC,CAAa;IAG1G,SAAS,CAAC,WAAW,IAAI,OAAO,CAAC,WAAW,CAAC;IAgC7C,SAAS,CAAC,wBAAwB,EAAE,CAAC,UAAU,EAAE,WAAW,KAAK,OAAO,CAAC,WAAW,CAAC,CAAa;IAClG,SAAS,CAAC,yBAAyB,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,CAAa;IACrF,SAAS,CAAC,6BAA6B,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,CAAa;IAEhF,SAAS,CAAC,uBAAuB,CAAC,UAAU,EAAE,WAAW,EAAE,cAAc,EAAE,cAAc,EAAE,uBAAuB,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;cAkC1H,eAAe,IAAI,OAAO,CAAC,WAAW,CAAC;cAsBvC,SAAS,CAAC,eAAe,EAAE,gBAAgB,GAAG,OAAO,CAAC,IAAI,CAAC;YAoF7D,gBAAgB;IAkD9B,OAAO,CAAC,KAAK;IAIb,OAAO,CAAC,oBAAoB;YAoBd,cAAc;YAOd,mBAAmB;CASpC","file":"ServiceRecognizerBase.d.ts","sourcesContent":["// Copyright (c) Microsoft Corporation. All rights reserved.\r\n// Licensed under the MIT license.\r\n\r\nimport { ReplayableAudioNode } from \"../common.browser/Exports.js\";\r\nimport { ConnectionOpenResponse } from \"../common/ConnectionOpenResponse.js\";\r\nimport {\r\n ArgumentNullError,\r\n ConnectionClosedEvent,\r\n ConnectionEvent,\r\n ConnectionState,\r\n createNoDashGuid,\r\n EventSource,\r\n IAudioSource,\r\n IAudioStreamNode,\r\n IConnection,\r\n IDisposable,\r\n IStreamChunk,\r\n MessageType,\r\n ServiceEvent,\r\n Timeout\r\n} from \"../common/Exports.js\";\r\nimport { AudioStreamFormatImpl } from \"../sdk/Audio/AudioStreamFormat.js\";\r\nimport { SpeakerRecognitionModel } from \"../sdk/SpeakerRecognitionModel.js\";\r\nimport {\r\n CancellationErrorCode,\r\n CancellationReason,\r\n PropertyId,\r\n RecognitionEventArgs,\r\n Recognizer,\r\n SessionEventArgs,\r\n SpeakerRecognitionResult,\r\n SpeechRecognitionResult,\r\n OutputFormat\r\n} from \"../sdk/Exports.js\";\r\nimport { Callback } from \"../sdk/Transcription/IConversation.js\";\r\nimport {\r\n AgentConfig,\r\n DynamicGrammarBuilder,\r\n ISpeechConfigAudioDevice,\r\n RecognitionMode,\r\n RequestSession,\r\n SpeechContext,\r\n SpeechDetected,\r\n type,\r\n OutputFormatPropertyName\r\n} from \"./Exports.js\";\r\nimport {\r\n AuthInfo,\r\n IAuthentication,\r\n} from \"./IAuthentication.js\";\r\nimport { IConnectionFactory } from \"./IConnectionFactory.js\";\r\nimport { RecognizerConfig } from \"./RecognizerConfig.js\";\r\nimport { SpeechConnectionMessage } from \"./SpeechConnectionMessage.Internal.js\";\r\n\r\ninterface CustomModel {\r\n language: string;\r\n endpoint: string;\r\n}\r\n\r\nexport interface PhraseDetection {\r\n customModels?: CustomModel[];\r\n onInterim?: { action: string };\r\n onSuccess?: { action: string };\r\n mode?: string;\r\n INTERACTIVE?: Segmentation;\r\n CONVERSATION?: Segmentation;\r\n DICTATION?: Segmentation;\r\n speakerDiarization?: SpeakerDiarization;\r\n}\r\n\r\nexport interface SpeakerDiarization {\r\n mode?: string;\r\n audioSessionId?: string;\r\n audioOffsetMs?: number;\r\n identityProvider?: string;\r\n diarizeIntermediates?: boolean;\r\n}\r\n\r\nexport interface Segmentation {\r\n segmentation: {\r\n mode: string;\r\n segmentationSilenceTimeoutMs?: number;\r\n segmentationForcedTimeoutMs?: number;\r\n };\r\n}\r\n\r\nexport abstract class ServiceRecognizerBase implements IDisposable {\r\n private privAuthentication: IAuthentication;\r\n private privConnectionFactory: IConnectionFactory;\r\n\r\n // A promise for a configured connection.\r\n // Do not consume directly, call fetchConnection instead.\r\n private privConnectionConfigurationPromise: Promise<IConnection> = undefined;\r\n\r\n // A promise for a connection, but one that has not had the speech context sent yet.\r\n // Do not consume directly, call fetchConnection instead.\r\n private privConnectionPromise: Promise<IConnection> = undefined;\r\n private privAuthFetchEventId: string;\r\n private privIsDisposed: boolean;\r\n private privMustReportEndOfStream: boolean;\r\n private privConnectionEvents: EventSource<ConnectionEvent>;\r\n private privServiceEvents: EventSource<ServiceEvent>;\r\n private privDynamicGrammar: DynamicGrammarBuilder;\r\n private privAgentConfig: AgentConfig;\r\n private privServiceHasSentMessage: boolean;\r\n private privActivityTemplate: string;\r\n private privSetTimeout: (cb: () => void, delay: number) => number = setTimeout;\r\n private privAudioSource: IAudioSource;\r\n private privIsLiveAudio: boolean = false;\r\n private privAverageBytesPerMs: number = 0;\r\n protected privSpeechContext: SpeechContext;\r\n protected privRequestSession: RequestSession;\r\n protected privConnectionId: string;\r\n protected privDiarizationSessionId: string;\r\n protected privRecognizerConfig: RecognizerConfig;\r\n protected privRecognizer: Recognizer;\r\n protected privSuccessCallback: (e: SpeechRecognitionResult) => void;\r\n protected privErrorCallback: (e: string) => void;\r\n protected privEnableSpeakerId: boolean = false;\r\n protected privExpectContentAssessmentResponse: boolean = false;\r\n\r\n public constructor(\r\n authentication: IAuthentication,\r\n connectionFactory: IConnectionFactory,\r\n audioSource: IAudioSource,\r\n recognizerConfig: RecognizerConfig,\r\n recognizer: Recognizer) {\r\n\r\n if (!authentication) {\r\n throw new ArgumentNullError(\"authentication\");\r\n }\r\n\r\n if (!connectionFactory) {\r\n throw new ArgumentNullError(\"connectionFactory\");\r\n }\r\n\r\n if (!audioSource) {\r\n throw new ArgumentNullError(\"audioSource\");\r\n }\r\n\r\n if (!recognizerConfig) {\r\n throw new ArgumentNullError(\"recognizerConfig\");\r\n }\r\n\r\n this.privEnableSpeakerId = recognizerConfig.isSpeakerDiarizationEnabled;\r\n this.privMustReportEndOfStream = false;\r\n this.privAuthentication = authentication;\r\n this.privConnectionFactory = connectionFactory;\r\n this.privAudioSource = audioSource;\r\n this.privRecognizerConfig = recognizerConfig;\r\n this.privIsDisposed = false;\r\n this.privRecognizer = recognizer;\r\n this.privRequestSession = new RequestSession(this.privAudioSource.id());\r\n this.privConnectionEvents = new EventSource<ConnectionEvent>();\r\n this.privServiceEvents = new EventSource<ServiceEvent>();\r\n this.privDynamicGrammar = new DynamicGrammarBuilder();\r\n this.privSpeechContext = new SpeechContext(this.privDynamicGrammar);\r\n this.privAgentConfig = new AgentConfig();\r\n const webWorkerLoadType: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.WebWorkerLoadType, \"on\").toLowerCase();\r\n if (webWorkerLoadType === \"on\" && typeof (Blob) !== \"undefined\" && typeof (Worker) !== \"undefined\") {\r\n this.privSetTimeout = Timeout.setTimeout;\r\n } else {\r\n if (typeof window !== \"undefined\") {\r\n // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment\r\n this.privSetTimeout = window.setTimeout.bind(window);\r\n }\r\n if (typeof globalThis !== \"undefined\") {\r\n // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment\r\n this.privSetTimeout = globalThis.setTimeout.bind(globalThis);\r\n }\r\n }\r\n\r\n this.connectionEvents.attach((connectionEvent: ConnectionEvent): void => {\r\n if (connectionEvent.name === \"ConnectionClosedEvent\") {\r\n const connectionClosedEvent = connectionEvent as ConnectionClosedEvent;\r\n if (connectionClosedEvent.statusCode === 1003 ||\r\n connectionClosedEvent.statusCode === 1007 ||\r\n connectionClosedEvent.statusCode === 1002 ||\r\n connectionClosedEvent.statusCode === 4000 ||\r\n this.privRequestSession.numConnectionAttempts > this.privRecognizerConfig.maxRetryCount\r\n ) {\r\n void this.cancelRecognitionLocal(CancellationReason.Error,\r\n connectionClosedEvent.statusCode === 1007 ? CancellationErrorCode.BadRequestParameters : CancellationErrorCode.ConnectionFailure,\r\n `${connectionClosedEvent.reason} websocket error code: ${connectionClosedEvent.statusCode}`);\r\n }\r\n }\r\n });\r\n\r\n if (this.privEnableSpeakerId) {\r\n this.privDiarizationSessionId = createNoDashGuid();\r\n }\r\n\r\n this.setLanguageIdJson();\r\n this.setOutputDetailLevelJson();\r\n }\r\n\r\n protected setTranslationJson(): void {\r\n const targetLanguages: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.SpeechServiceConnection_TranslationToLanguages, undefined);\r\n if (targetLanguages !== undefined) {\r\n const languages = targetLanguages.split(\",\");\r\n const translationVoice: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.SpeechServiceConnection_TranslationVoice, undefined);\r\n const action = ( translationVoice !== undefined ) ? \"Synthesize\" : \"None\";\r\n this.privSpeechContext.setSection(\"translation\", {\r\n onSuccess: { action },\r\n output: { interimResults: { mode: \"Always\" } },\r\n targetLanguages: languages,\r\n });\r\n\r\n if (translationVoice !== undefined) {\r\n const languageToVoiceMap: { [key: string]: string } = {};\r\n for (const lang of languages) {\r\n languageToVoiceMap[lang] = translationVoice;\r\n }\r\n this.privSpeechContext.setSection(\"synthesis\", {\r\n defaultVoices: languageToVoiceMap\r\n });\r\n }\r\n }\r\n }\r\n\r\n protected setSpeechSegmentationTimeoutJson(): void {\r\n const speechSegmentationSilenceTimeoutMs: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.Speech_SegmentationSilenceTimeoutMs, undefined);\r\n const speechSegmentationMaximumTimeMs: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.Speech_SegmentationMaximumTimeMs, undefined);\r\n const speechSegmentationStrategy: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.Speech_SegmentationStrategy, undefined);\r\n const segmentation: Segmentation = {\r\n segmentation: {\r\n mode: \"\"\r\n }\r\n };\r\n let configuredSegment = false;\r\n\r\n if (speechSegmentationStrategy !== undefined) {\r\n configuredSegment = true;\r\n let segMode: string = \"\";\r\n switch (speechSegmentationStrategy.toLowerCase()) {\r\n case \"default\":\r\n break;\r\n case \"time\":\r\n segMode = \"Custom\";\r\n break;\r\n case \"semantic\":\r\n segMode = \"Semantic\";\r\n break;\r\n }\r\n\r\n segmentation.segmentation.mode = segMode;\r\n }\r\n\r\n if (speechSegmentationSilenceTimeoutMs !== undefined) {\r\n configuredSegment = true;\r\n const segmentationSilenceTimeoutMs: number = parseInt(speechSegmentationSilenceTimeoutMs, 10);\r\n segmentation.segmentation.mode = \"Custom\";\r\n segmentation.segmentation.segmentationSilenceTimeoutMs = segmentationSilenceTimeoutMs;\r\n }\r\n\r\n if (speechSegmentationMaximumTimeMs !== undefined) {\r\n configuredSegment = true;\r\n const segmentationMaximumTimeMs: number = parseInt(speechSegmentationMaximumTimeMs, 10);\r\n segmentation.segmentation.mode = \"Custom\";\r\n segmentation.segmentation.segmentationForcedTimeoutMs = segmentationMaximumTimeMs;\r\n }\r\n\r\n if (configuredSegment) {\r\n const recoMode = this.recognitionMode === RecognitionMode.Conversation ? \"CONVERSATION\" :\r\n this.recognitionMode === RecognitionMode.Dictation ? \"DICTATION\" : \"INTERACTIVE\";\r\n const phraseDetection = this.privSpeechContext.getSection(\"phraseDetection\") as PhraseDetection;\r\n phraseDetection.mode = recoMode;\r\n phraseDetection[recoMode] = segmentation;\r\n this.privSpeechContext.setSection(\"phraseDetection\", phraseDetection);\r\n }\r\n }\r\n\r\n protected setLanguageIdJson(): void {\r\n const phraseDetection = this.privSpeechContext.getSection(\"phraseDetection\") as PhraseDetection;\r\n if (this.privRecognizerConfig.autoDetectSourceLanguages !== undefined) {\r\n const sourceLanguages: string[] = this.privRecognizerConfig.autoDetectSourceLanguages.split(\",\");\r\n\r\n let speechContextLidMode;\r\n if (this.privRecognizerConfig.languageIdMode === \"Continuous\") {\r\n speechContextLidMode = \"DetectContinuous\";\r\n } else {// recognizerConfig.languageIdMode === \"AtStart\"\r\n speechContextLidMode = \"DetectAtAudioStart\";\r\n }\r\n\r\n this.privSpeechContext.setSection(\"languageId\", {\r\n Priority: \"PrioritizeLatency\",\r\n languages: sourceLanguages,\r\n mode: speechContextLidMode,\r\n onSuccess: { action: \"Recognize\" },\r\n onUnknown: { action: \"None\" }\r\n });\r\n this.privSpeechContext.setSection(\"phraseOutput\", {\r\n interimResults: {\r\n resultType: \"Auto\"\r\n },\r\n phraseResults: {\r\n resultType: \"Always\"\r\n }\r\n });\r\n const customModels: CustomModel[] = this.privRecognizerConfig.sourceLanguageModels;\r\n if (customModels !== undefined) {\r\n phraseDetection.customModels = customModels;\r\n phraseDetection.onInterim = { action: \"None\" };\r\n phraseDetection.onSuccess = { action: \"None\" };\r\n }\r\n }\r\n const targetLanguages: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.SpeechServiceConnection_TranslationToLanguages, undefined);\r\n if (targetLanguages !== undefined) {\r\n phraseDetection.onInterim = { action: \"Translate\" };\r\n phraseDetection.onSuccess = { action: \"Translate\" };\r\n this.privSpeechContext.setSection(\"phraseOutput\", {\r\n interimResults: {\r\n resultType: \"None\"\r\n },\r\n phraseResults: {\r\n resultType: \"None\"\r\n }\r\n });\r\n }\r\n\r\n this.privSpeechContext.setSection(\"phraseDetection\", phraseDetection);\r\n }\r\n\r\n protected setOutputDetailLevelJson(): void {\r\n if (this.privEnableSpeakerId) {\r\n const requestWordLevelTimestamps: string = this.privRecognizerConfig.parameters.getProperty(PropertyId.SpeechServiceResponse_RequestWordLevelTimestamps, \"false\").toLowerCase();\r\n if (requestWordLevelTimestamps === \"true\") {\r\n this.privSpeechContext.setWordLevelTimings();\r\n } else {\r\n const outputFormat: string = this.privRecognizerConfig.parameters.getProperty(OutputFormatPropertyName, OutputFormat[OutputFormat.Simple]).toLowerCase();\r\n if (outputFormat === OutputFormat[OutputFormat.Detailed].toLocaleLowerCase()) {\r\n this.privSpeechContext.setDetailedOutputFormat();\r\n }\r\n }\r\n }\r\n }\r\n\r\n public get isSpeakerDiarizationEnabled(): boolean {\r\n return this.privEnableSpeakerId;\r\n }\r\n\r\n public get audioSource(): IAudioSource {\r\n return this.privAudioSource;\r\n }\r\n\r\n public get speechContext(): SpeechContext {\r\n return this.privSpeechContext;\r\n }\r\n\r\n public get dynamicGrammar(): DynamicGrammarBuilder {\r\n return this.privDynamicGrammar;\r\n }\r\n\r\n public get agentConfig(): AgentConfig {\r\n return this.privAgentConfig;\r\n }\r\n\r\n public set conversationTranslatorToken(token: string) {\r\n this.privRecognizerConfig.parameters.setProperty(PropertyId.ConversationTranslator_Token, token);\r\n }\r\n\r\n public set voiceProfileType(type: string) {\r\n this.privRecognizerConfig.parameters.setProperty(PropertyId.SpeechServiceConnection_SpeakerIdMode, type);\r\n }\r\n\r\n public set authentication(auth: IAuthentication) {\r\n this.privAuthentication = auth;\r\n }\r\n\r\n public isDisposed(): boolean {\r\n return this.privIsDisposed;\r\n }\r\n\r\n public async dispose(reason?: string): Promise<void> {\r\n this.privIsDisposed = true;\r\n if (this.privConnectionConfigurationPromise !== undefined) {\r\n try {\r\n const connection: IConnection = await this.privConnectionConfigurationPromise;\r\n await connection.dispose(reason);\r\n } catch (error) {\r\n // The connection is in a bad state. But we're trying to kill it, so...\r\n return;\r\n }\r\n }\r\n }\r\n\r\n public get connectionEvents(): EventSource<ConnectionEvent> {\r\n return this.privConnectionEvents;\r\n }\r\n\r\n public get serviceEvents(): EventSource<ServiceEvent> {\r\n return this.privServiceEvents;\r\n }\r\n\r\n public get recognitionMode(): RecognitionMode {\r\n return this.privRecognizerConfig.recognitionMode;\r\n }\r\n\r\n protected recognizeOverride: (recoMode: RecognitionMode, sc: (e: SpeechRecognitionResult) => void, ec: (e: string) => void) => Promise<void> = undefined;\r\n\r\n public recognizeSpeaker: (model: SpeakerRecognitionModel) => Promise<SpeakerRecognitionResult> = undefined;\r\n\r\n public async recognize(\r\n recoMode: RecognitionMode,\r\n successCallback: (e: SpeechRecognitionResult) => void,\r\n errorCallBack: (e: string) => void,\r\n ): Promise<void> {\r\n\r\n if (this.recognizeOverride !== undefined) {\r\n await this.recognizeOverride(recoMode, successCallback, errorCallBack);\r\n return;\r\n }\r\n // Clear the existing configuration promise to force a re-transmission of config and context.\r\n this.privConnectionConfigurationPromise = undefined;\r\n this.privRecognizerConfig.recognitionMode = recoMode;\r\n this.setSpeechSegmentationTimeoutJson();\r\n this.setTranslationJson();\r\n\r\n this.privSuccessCallback = successCallback;\r\n this.privErrorCallback = errorCallBack;\r\n\r\n this.privRequestSession.startNewRecognition();\r\n this.privRequestSession.listenForServiceTelemetry(this.privAudioSource.events);\r\n\r\n // Start the connection to the service. The promise this will create is stored and will be used by configureConnection().\r\n const conPromise: Promise<IConnection> = this.connectImpl();\r\n let audioNode: ReplayableAudioNode;\r\n\r\n try {\r\n const audioStreamNode: IAudioStreamNode = await this.audioSource.attach(this.privRequestSession.audioNodeId);\r\n const format: AudioStreamFormatImpl = await this.audioSource.format;\r\n const deviceInfo: ISpeechConfigAudioDevice = await this.audioSource.deviceInfo;\r\n this.privIsLiveAudio = deviceInfo.type && deviceInfo.type === type.Microphones;\r\n\r\n audioNode = new ReplayableAudioNode(audioStreamNode, format.avgBytesPerSec);\r\n await this.privRequestSession.onAudioSourceAttachCompleted(audioNode, false);\r\n this.privRecognizerConfig.SpeechServiceConfig.Context.audio = { source: deviceInfo };\r\n\r\n } catch (error) {\r\n await this.privRequestSession.onStopRecognizing();\r\n throw error;\r\n }\r\n\r\n try {\r\n await conPromise;\r\n } catch (error) {\r\n await this.cancelRecognitionLocal(CancellationReason.Error, CancellationErrorCode.ConnectionFailure, error as string);\r\n return;\r\n }\r\n\r\n const sessionStartEventArgs: SessionEventArgs = new SessionEventArgs(this.privRequestSession.sessionId);\r\n\r\n if (!!this.privRecognizer.sessionStarted) {\r\n this.privRecognizer.sessionStarted(this.privRecognizer, sessionStartEventArgs);\r\n }\r\n\r\n void this.receiveMessage();\r\n const audioSendPromise = this.sendAudio(audioNode);\r\n\r\n audioSendPromise.catch(async (error: string): Promise<void> => {\r\n await this.cancelRecognitionLocal(CancellationReason.Error, CancellationErrorCode.RuntimeError, error);\r\n });\r\n\r\n return;\r\n }\r\n\r\n public async stopRecognizing(): Promise<void> {\r\n if (this.privRequestSession.isRecognizing) {\r\n try {\r\n await this.audioSource.turnOff();\r\n await this.sendFinalAudio();\r\n await this.privRequestSession.onStopRecognizing();\r\n await this.privRequestSession.turnCompletionPromise;\r\n } finally {\r\n await this.privRequestSession.dispose();\r\n }\r\n }\r\n return;\r\n }\r\n\r\n public async connect(): Promise<void> {\r\n await this.connectImpl();\r\n return Promise.resolve();\r\n }\r\n\r\n public connectAsync(cb?: Callback, err?: Callback): void {\r\n this.connectImpl().then((): void => {\r\n try {\r\n if (!!cb) {\r\n cb();\r\n }\r\n } catch (e) {\r\n if (!!err) {\r\n err(e);\r\n }\r\n }\r\n }, (reason: any): void => {\r\n try {\r\n if (!!err) {\r\n err(reason);\r\n }\r\n /* eslint-disable no-empty */\r\n } catch (error) {\r\n }\r\n });\r\n }\r\n\r\n protected disconnectOverride: () => Promise<void> = undefined;\r\n\r\n public async disconnect(): Promise<void> {\r\n await this.cancelRecognitionLocal(CancellationReason.Error,\r\n CancellationErrorCode.NoError,\r\n \"Disconnecting\");\r\n\r\n if (this.disconnectOverride !== undefined) {\r\n await this.disconnectOverride();\r\n }\r\n\r\n if (this.privConnectionPromise !== undefined) {\r\n try {\r\n await (await this.privConnectionPromise).dispose();\r\n } catch (error) {\r\n\r\n }\r\n }\r\n this.privConnectionPromise = undefined;\r\n }\r\n\r\n // Called when telemetry data is sent to the service.\r\n // Used for testing Telemetry capture.\r\n public static telemetryData: (json: string) => void;\r\n public static telemetryDataEnabled: boolean = true;\r\n\r\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\r\n public sendMessage(message: string): Promise<void> {\r\n return;\r\n }\r\n\r\n public async sendNetworkMessage(path: string, payload: string | ArrayBuffer): Promise<void> {\r\n const type: MessageType = typeof payload === \"string\" ? MessageType.Text : MessageType.Binary;\r\n const contentType: string = typeof payload === \"string\" ? \"application/json\" : \"\";\r\n\r\n const connection: IConnection = await this.fetchConnection();\r\n return connection.send(new SpeechConnectionMessage(type, path, this.privRequestSession.requestId, contentType, payload));\r\n }\r\n\r\n public set activityTemplate(messagePayload: string) {\r\n this.privActivityTemplate = messagePayload;\r\n }\r\n\r\n public get activityTemplate(): string {\r\n return this.privActivityTemplate;\r\n }\r\n\r\n public set expectContentAssessmentResponse(value: boolean) {\r\n this.privExpectContentAssessmentResponse = value;\r\n }\r\n\r\n protected abstract processTypeSpecificMessages(\r\n connectionMessage: SpeechConnectionMessage,\r\n successCallback?: (e: SpeechRecognitionResult) => void,\r\n errorCallBack?: (e: string) => void): Promise<boolean>;\r\n\r\n protected async sendTelemetryData(): Promise<void> {\r\n const telemetryData = this.privRequestSession.getTelemetry();\r\n if (ServiceRecognizerBase.telemetryDataEnabled !== true ||\r\n this.privIsDisposed ||\r\n null === telemetryData) {\r\n return;\r\n }\r\n\r\n if (!!ServiceRecognizerBase.telemetryData) {\r\n try {\r\n ServiceRecognizerBase.telemetryData(telemetryData);\r\n /* eslint-disable no-empty */\r\n } catch { }\r\n }\r\n\r\n const connection: IConnection = await this.fetchConnection();\r\n await connection.send(new SpeechConnectionMessage(\r\n MessageType.Text,\r\n \"telemetry\",\r\n this.privRequestSession.requestId,\r\n \"application/json\",\r\n telemetryData));\r\n }\r\n\r\n // Cancels recognition.\r\n protected abstract cancelRecognition(\r\n sessionId: string,\r\n requestId: string,\r\n cancellationReason: CancellationReason,\r\n errorCode: CancellationErrorCode,\r\n error: string): void;\r\n\r\n // Cancels recognition.\r\n protected async cancelRecognitionLocal(\r\n cancellationReason: CancellationReason,\r\n errorCode: CancellationErrorCode,\r\n error: string): Promise<void> {\r\n\r\n if (!!this.privRequestSession.isRecognizing) {\r\n await this.privRequestSession.onStopRecognizing();\r\n\r\n this.cancelRecognition(\r\n this.privRequestSession.sessionId,\r\n this.privRequestSession.requestId,\r\n cancellationReason,\r\n errorCode,\r\n error);\r\n }\r\n }\r\n\r\n protected receiveMessageOverride: () => Promise<void> = undefined;\r\n\r\n protected async receiveMessage(): Promise<void> {\r\n try {\r\n if (this.privIsDisposed) {\r\n // We're done.\r\n return;\r\n }\r\n\r\n let connection = await this.fetchConnection();\r\n const message = await connection.read();\r\n\r\n if (this.receiveMessageOverride !== undefined) {\r\n return this.receiveMessageOverride();\r\n }\r\n\r\n // indicates we are draining the queue and it came with no message;\r\n if (!message) {\r\n return this.receiveMessage();\r\n }\r\n\r\n this.privServiceHasSentMessage = true;\r\n const connectionMessage = SpeechConnectionMessage.fromConnectionMessage(message);\r\n\r\n if (connectionMessage.requestId.toLowerCase() === this.privRequestSession.requestId.toLowerCase()) {\r\n switch (connectionMessage.path.toLowerCase()) {\r\n case \"turn.start\":\r\n this.privMustReportEndOfStream = true;\r\n this.privRequestSession.onServiceTurnStartResponse();\r\n break;\r\n\r\n case \"speech.startdetected\":\r\n const speechStartDetected: SpeechDetected = SpeechDetected.fromJSON(connectionMessage.textBody, this.privRequestSession.currentTurnAudioOffset);\r\n const speechStartEventArgs = new RecognitionEventArgs(speechStartDetected.Offset, this.privRequestSession.sessionId);\r\n if (!!this.privRecognizer.speechStartDetected) {\r\n this.privRecognizer.speechStartDetected(this.privRecognizer, speechStartEventArgs);\r\n }\r\n break;\r\n\r\n case \"speech.enddetected\":\r\n let json: string;\r\n if (connectionMessage.textBody.length > 0) {\r\n json = connectionMessage.textBody;\r\n } else {\r\n // If the request was empty, the JSON returned is empty.\r\n json = \"{ Offset: 0 }\";\r\n }\r\n const speechStopDetected: SpeechDetected = SpeechDetected.fromJSON(json, this.privRequestSession.currentTurnAudioOffset);\r\n const speechStopEventArgs = new RecognitionEventArgs(speechStopDetected.Offset + this.privRequestSession.currentTurnAudioOffset, this.privRequestSession.sessionId);\r\n if (!!this.privRecognizer.speechEndDetected) {\r\n this.privRecognizer.speechEndDetected(this.privRecognizer, speechStopEventArgs);\r\n }\r\n break;\r\n\r\n case \"turn.end\":\r\n await this.sendTelemetryData();\r\n if (this.privRequestSession.isSpeechEnded && this.privMustReportEndOfStream) {\r\n this.privMustReportEndOfStream = false;\r\n await this.cancelRecognitionLocal(CancellationReason.EndOfStream, CancellationErrorCode.NoError, undefined);\r\n }\r\n const sessionStopEventArgs: SessionEventArgs = new SessionEventArgs(this.privRequestSession.sessionId);\r\n await this.privRequestSession.onServiceTurnEndResponse(this.privRecognizerConfig.isContinuousRecognition);\r\n if (!this.privRecognizerConfig.isContinuousRecognition || this.privRequestSession.isSpeechEnded || !this.privRequestSession.isRecognizing) {\r\n if (!!this.privRecognizer.sessionStopped) {\r\n this.privRecognizer.sessionStopped(this.privRecognizer, sessionStopEventArgs);\r\n }\r\n return;\r\n } else {\r\n connection = await this.fetchConnection();\r\n await this.sendPrePayloadJSON(connection);\r\n }\r\n break;\r\n\r\n default:\r\n if (!await this.processTypeSpecificMessages(connectionMessage)) {\r\n // here are some messages that the derived class has not processed, dispatch them to connect class\r\n if (!!this.privServiceEvents) {\r\n this.serviceEvents.onEvent(new ServiceEvent(connectionMessage.path.toLowerCase(), connectionMessage.textBody));\r\n }\r\n }\r\n }\r\n }\r\n return this.receiveMessage();\r\n } catch (error) {\r\n return null;\r\n }\r\n }\r\n\r\n private updateSpeakerDiarizationAudioOffset(): void {\r\n const bytesSent: number = this.privRequestSession.recognitionBytesSent;\r\n const audioOffsetMs: number = this.privAverageBytesPerMs !== 0 ? bytesSent / this.privAverageBytesPerMs : 0;\r\n this.privSpeechContext.setSpeakerDiarizationAudioOffsetMs(audioOffsetMs);\r\n }\r\n\r\n protected sendSpeechContext(connection: IConnection, generateNewRequestId: boolean): Promise<void> {\r\n if (this.privEnableSpeakerId) {\r\n this.updateSpeakerDiarizationAudioOffset();\r\n }\r\n const speechContextJson = this.speechContext.toJSON();\r\n if (generateNewRequestId) {\r\n this.privRequestSession.onSpeechContext();\r\n }\r\n\r\n if (speechContextJson) {\r\n return connection.send(new SpeechConnectionMessage(\r\n MessageType.Text,\r\n \"speech.context\",\r\n this.privRequestSession.requestId,\r\n \"application/json\",\r\n speechContextJson));\r\n }\r\n return;\r\n }\r\n\r\n protected sendPrePayloadJSONOverride: (connection: IConnection) => Promise<void> = undefined;\r\n\r\n protected noOp(): Promise<void> {\r\n // operation not supported\r\n return;\r\n }\r\n\r\n // Encapsulated for derived service recognizers that need to send additional JSON\r\n protected async sendPrePayloadJSON(connection: IConnection, generateNewRequestId: boolean = true): Promise<void> {\r\n if (this.sendPrePayloadJSONOverride !== undefined) {\r\n return this.sendPrePayloadJSONOverride(connection);\r\n }\r\n\r\n await this.sendSpeechContext(connection, generateNewRequestId);\r\n await this.sendWaveHeader(connection);\r\n return;\r\n }\r\n\r\n protected async sendWaveHeader(connection: IConnection): Promise<void> {\r\n const format: AudioStreamFormatImpl = await this.audioSource.format;\r\n // this.writeBufferToConsole(format.header);\r\n return connection.send(new SpeechConnectionMessage(\r\n MessageType.Binary,\r\n \"audio\",\r\n this.privRequestSession.requestId,\r\n \"audio/x-wav\",\r\n format.header\r\n ));\r\n }\r\n\r\n protected postConnectImplOverride: (connection: Promise<IConnection>) => Promise<IConnection> = undefined;\r\n\r\n // Establishes a websocket connection to the end point.\r\n protected connectImpl(): Promise<IConnection> {\r\n if (this.privConnectionPromise !== undefined) {\r\n return this.privConnectionPromise.then((connection: IConnection): Promise<IConnection> => {\r\n if (connection.state() === ConnectionState.Disconnected) {\r\n this.privConnectionId = null;\r\n this.privConnectionPromise = undefined;\r\n this.privServiceHasSentMessage = false;\r\n return this.connectImpl();\r\n }\r\n return this.privConnectionPromise;\r\n }, (): Promise<IConnection> => {\r\n this.privConnectionId = null;\r\n this.privConnectionPromise = undefined;\r\n this.privServiceHasSentMessage = false;\r\n return this.connectImpl();\r\n });\r\n }\r\n\r\n this.privConnectionPromise = this.retryableConnect();\r\n\r\n // Attach an empty handler to allow the promise to run in the background while\r\n // other startup events happen. It'll eventually be awaited on.\r\n // eslint-disable-next-line @typescript-eslint/no-empty-function\r\n this.privConnectionPromise.catch((): void => { });\r\n\r\n if (this.postConnectImplOverride !== undefined) {\r\n return this.postConnectImplOverride(this.privConnectionPromise);\r\n }\r\n\r\n return this.privConnectionPromise;\r\n }\r\n\r\n protected configConnectionOverride: (connection: IConnection) => Promise<IConnection> = undefined;\r\n protected handleSpeechPhraseMessage: (textBody: string) => Promise<void> = undefined;\r\n protected handleSpeechHypothesisMessage: (textBody: string) => void = undefined;\r\n\r\n protected sendSpeechServiceConfig(connection: IConnection, requestSession: RequestSession, SpeechServiceConfigJson: string): Promise<void> {\r\n requestSession.onSpeechContext();\r\n // filter out anything that is not required for the service to work.\r\n if (ServiceRecognizerBase.telemetryDataEnabled !== true) {\r\n const withTelemetry: { context: { system: string } } = JSON.parse(SpeechServiceConfigJson) as { context: { system: string } };\r\n\r\n const replacement: any = {\r\n context: {\r\n system: withTelemetry.context.system,\r\n },\r\n };\r\n\r\n SpeechServiceConfigJson = JSON.stringify(replacement);\r\n }\r\n\r\n if (this.privRecognizerConfig.parameters.getProperty(\"f0f5debc-f8c9-4892-ac4b-90a7ab359fd2\", \"false\").toLowerCase() === \"true\") {\r\n const json: { context: { DisableReferenceChannel: string; MicSpec: string } } = JSON.parse(SpeechServiceConfigJson) as { context: { DisableReferenceChannel: string; MicSpec: string } };\r\n json.context.DisableReferenceChannel = \"True\";\r\n json.context.MicSpec = \"1_0_0\";\r\n SpeechServiceConfigJson = JSON.stringify(json);\r\n }\r\n\r\n if (SpeechServiceConfigJson) {\r\n return connection.send(new SpeechConnectionMessage(\r\n MessageType.Text,\r\n \"speech.config\",\r\n requestSession.requestId,\r\n \"application/json\",\r\n SpeechServiceConfigJson));\r\n }\r\n\r\n return;\r\n }\r\n\r\n protected async fetchConnection(): Promise<IConnection> {\r\n if (this.privConnectionConfigurationPromise !== undefined) {\r\n return this.privConnectionConfigurationPromise.then((connection: IConnection): Promise<IConnection> => {\r\n if (connection.state() === ConnectionState.Disconnected) {\r\n this.privConnectionId = null;\r\n this.privConnectionConfigurationPromise = undefined;\r\n this.privServiceHasSentMessage = false;\r\n return this.fetchConnection();\r\n }\r\n return this.privConnectionConfigurationPromise;\r\n }, (): Promise<IConnection> => {\r\n this.privConnectionId = null;\r\n this.privConnectionConfigurationPromise = undefined;\r\n this.privServiceHasSentMessage = false;\r\n return this.fetchConnection();\r\n });\r\n }\r\n\r\n this.privConnectionConfigurationPromise = this.configureConnection();\r\n return await this.privConnectionConfigurationPromise;\r\n }\r\n\r\n protected async sendAudio(audioStreamNode: IAudioStreamNode): Promise<void> {\r\n const audioFormat: AudioStreamFormatImpl = await this.audioSource.format;\r\n this.privAverageBytesPerMs = audioFormat.avgBytesPerSec / 1000;\r\n // The time we last sent data to the service.\r\n let nextSendTime: number = Date.now();\r\n\r\n // Max amount to send before we start to throttle\r\n const fastLaneSizeMs: string = this.privRecognizerConfig.parameters.getProperty(\"SPEECH-TransmitLengthBeforThrottleMs\", \"5000\");\r\n const maxSendUnthrottledBytes: number = audioFormat.avgBytesPerSec / 1000 * parseInt(fastLaneSizeMs, 10);\r\n const startRecogNumber: number = this.privRequestSession.recogNumber;\r\n\r\n const readAndUploadCycle = async (): Promise<void> => {\r\n // If speech is done, stop sending audio.\r\n if (!this.privIsDisposed &&\r\n !this.privRequestSession.isSpeechEnded &&\r\n this.privRequestSession.isRecognizing &&\r\n this.privRequestSession.recogNumber === startRecogNumber) {\r\n\r\n const connection: IConnection = await this.fetchConnection();\r\n const audioStreamChunk: IStreamChunk<ArrayBuffer> = await audioStreamNode.read();\r\n // we have a new audio chunk to upload.\r\n if (this.privRequestSession.isSpeechEnded) {\r\n // If service already recognized audio end then don't send any more audio\r\n return;\r\n }\r\n\r\n let payload: ArrayBuffer;\r\n let sendDelay: number;\r\n\r\n if (!audioStreamChunk || audioStreamChunk.isEnd) {\r\n payload = null;\r\n sendDelay = 0;\r\n } else {\r\n payload = audioStreamChunk.buffer;\r\n\r\n this.privRequestSession.onAudioSent(payload.byteLength);\r\n\r\n if (maxSendUnthrottledBytes >= this.privRequestSession.bytesSent) {\r\n sendDelay = 0;\r\n } else {\r\n sendDelay = Math.max(0, nextSendTime - Date.now());\r\n }\r\n }\r\n\r\n if (0 !== sendDelay) {\r\n await this.delay(sendDelay);\r\n }\r\n\r\n if (payload !== null) {\r\n nextSendTime = Date.now() + (payload.byteLength * 1000 / (audioFormat.avgBytesPerSec * 2));\r\n }\r\n\r\n // Are we still alive?\r\n if (!this.privIsDisposed &&\r\n !this.privRequestSession.isSpeechEnded &&\r\n this.privRequestSession.isRecognizing &&\r\n this.privRequestSession.recogNumber === startRecogNumber) {\r\n connection.send(\r\n new SpeechConnectionMessage(MessageType.Binary, \"audio\", this.privRequestSession.requestId, null, payload)\r\n ).catch((): void => {\r\n // eslint-disable-next-line @typescript-eslint/no-empty-function\r\n this.privRequestSession.onServiceTurnEndResponse(this.privRecognizerConfig.isContinuousRecognition).catch((): void => { });\r\n });\r\n\r\n if (!audioStreamChunk?.isEnd) {\r\n // this.writeBufferToConsole(payload);\r\n // Regardless of success or failure, schedule the next upload.\r\n // If the underlying connection was broken, the next cycle will\r\n // get a new connection and re-transmit missing audio automatically.\r\n return readAndUploadCycle();\r\n } else {\r\n // the audio stream has been closed, no need to schedule next\r\n // read-upload cycle.\r\n if (!this.privIsLiveAudio) {\r\n this.privRequestSession.onSpeechEnded();\r\n }\r\n }\r\n }\r\n }\r\n };\r\n\r\n return readAndUploadCycle();\r\n }\r\n\r\n private async retryableConnect(): Promise<IConnection> {\r\n let isUnAuthorized: boolean = false;\r\n\r\n this.privAuthFetchEventId = createNoDashGuid();\r\n const sessionId: string = this.privRequestSession.sessionId;\r\n this.privConnectionId = (sessionId !== undefined) ? sessionId : createNoDashGuid();\r\n\r\n this.privRequestSession.onPreConnectionStart(this.privAuthFetchEventId, this.privConnectionId);\r\n let lastStatusCode: number = 0;\r\n let lastReason: string = \"\";\r\n\r\n while (this.privRequestSession.numConnectionAttempts <= this.privRecognizerConfig.maxRetryCount) {\r\n\r\n // Get the auth information for the connection. This is a bit of overkill for the current API surface, but leaving the plumbing in place to be able to raise a developer-customer\r\n // facing event when a connection fails to let them try and provide new auth information.\r\n const authPromise = isUnAuthorized ? this.privAuthentication.fetchOnExpiry(this.privAuthFetchEventId) : this.privAuthentication.fetch(this.privAuthFetchEventId);\r\n const auth: AuthInfo = await authPromise;\r\n\r\n await this.privRequestSession.onAuthCompleted(false);\r\n\r\n // Create the connection\r\n const connection: IConnection = this.privConnectionFactory.create(this.privRecognizerConfig, auth, this.privConnectionId);\r\n // Attach the telemetry handlers.\r\n this.privRequestSession.listenForServiceTelemetry(connection.events);\r\n\r\n // Attach to the underlying event. No need to hold onto the detach pointers as in the event the connection goes away,\r\n // it'll stop sending events.\r\n connection.events.attach((event: ConnectionEvent): void => {\r\n