microsoft-cognitiveservices-speech-sdk
Version:
Microsoft Cognitive Services Speech SDK for JavaScript
142 lines (140 loc) • 6.3 kB
JavaScript
"use strict";
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
Object.defineProperty(exports, "__esModule", { value: true });
exports.ResultReason = void 0;
/**
* Defines the possible reasons a recognition result might be generated.
* @class ResultReason
*/
var ResultReason;
(function (ResultReason) {
/**
* Indicates speech could not be recognized. More details
* can be found in the NoMatchDetails object.
* @member ResultReason.NoMatch
*/
ResultReason[ResultReason["NoMatch"] = 0] = "NoMatch";
/**
* Indicates that the recognition was canceled. More details
* can be found using the CancellationDetails object.
* @member ResultReason.Canceled
*/
ResultReason[ResultReason["Canceled"] = 1] = "Canceled";
/**
* Indicates the speech result contains hypothesis text.
* @member ResultReason.RecognizedSpeech
*/
ResultReason[ResultReason["RecognizingSpeech"] = 2] = "RecognizingSpeech";
/**
* Indicates the speech result contains final text that has been recognized.
* Speech Recognition is now complete for this phrase.
* @member ResultReason.RecognizedSpeech
*/
ResultReason[ResultReason["RecognizedSpeech"] = 3] = "RecognizedSpeech";
/**
* Indicates the speech result contains a finalized acceptance of a provided keyword.
* Speech recognition will continue unless otherwise configured.
* @member ResultReason.RecognizedKeyword
*/
ResultReason[ResultReason["RecognizedKeyword"] = 4] = "RecognizedKeyword";
/**
* Indicates the intent result contains hypothesis text and intent.
* @member ResultReason.RecognizingIntent
*/
ResultReason[ResultReason["RecognizingIntent"] = 5] = "RecognizingIntent";
/**
* Indicates the intent result contains final text and intent.
* Speech Recognition and Intent determination are now complete for this phrase.
* @member ResultReason.RecognizedIntent
*/
ResultReason[ResultReason["RecognizedIntent"] = 6] = "RecognizedIntent";
/**
* Indicates the translation result contains hypothesis text and its translation(s).
* @member ResultReason.TranslatingSpeech
*/
ResultReason[ResultReason["TranslatingSpeech"] = 7] = "TranslatingSpeech";
/**
* Indicates the translation result contains final text and corresponding translation(s).
* Speech Recognition and Translation are now complete for this phrase.
* @member ResultReason.TranslatedSpeech
*/
ResultReason[ResultReason["TranslatedSpeech"] = 8] = "TranslatedSpeech";
/**
* Indicates the synthesized audio result contains a non-zero amount of audio data
* @member ResultReason.SynthesizingAudio
*/
ResultReason[ResultReason["SynthesizingAudio"] = 9] = "SynthesizingAudio";
/**
* Indicates the synthesized audio is now complete for this phrase.
* @member ResultReason.SynthesizingAudioCompleted
*/
ResultReason[ResultReason["SynthesizingAudioCompleted"] = 10] = "SynthesizingAudioCompleted";
/**
* Indicates the speech synthesis is now started
* @member ResultReason.SynthesizingAudioStarted
*/
ResultReason[ResultReason["SynthesizingAudioStarted"] = 11] = "SynthesizingAudioStarted";
/**
* Indicates the voice profile is being enrolled and customers need to send more audio to create a voice profile.
* @member ResultReason.EnrollingVoiceProfile
*/
ResultReason[ResultReason["EnrollingVoiceProfile"] = 12] = "EnrollingVoiceProfile";
/**
* Indicates the voice profile has been enrolled.
* @member ResultReason.EnrolledVoiceProfile
*/
ResultReason[ResultReason["EnrolledVoiceProfile"] = 13] = "EnrolledVoiceProfile";
/**
* Indicates successful identification of some speakers.
* @member ResultReason.RecognizedSpeakers
*/
ResultReason[ResultReason["RecognizedSpeakers"] = 14] = "RecognizedSpeakers";
/**
* Indicates successfully verified one speaker.
* @member ResultReason.RecognizedSpeaker
*/
ResultReason[ResultReason["RecognizedSpeaker"] = 15] = "RecognizedSpeaker";
/**
* Indicates a voice profile has been reset successfully.
* @member ResultReason.ResetVoiceProfile
*/
ResultReason[ResultReason["ResetVoiceProfile"] = 16] = "ResetVoiceProfile";
/**
* Indicates a voice profile has been deleted successfully.
* @member ResultReason.DeletedVoiceProfile
*/
ResultReason[ResultReason["DeletedVoiceProfile"] = 17] = "DeletedVoiceProfile";
/**
* Indicates synthesis voices list has been successfully retrieved.
* @member ResultReason.VoicesListRetrieved
*/
ResultReason[ResultReason["VoicesListRetrieved"] = 18] = "VoicesListRetrieved";
/**
* Indicates the transcription result contains hypothesis text and its translation(s) for
* other participants in the conversation.
* @member ResultReason.TranslatingParticipantSpeech
*/
ResultReason[ResultReason["TranslatingParticipantSpeech"] = 19] = "TranslatingParticipantSpeech";
/**
* Indicates the transcription result contains final text and corresponding translation(s)
* for other participants in the conversation. Speech Recognition and Translation are now
* complete for this phrase.
* @member ResultReason.TranslatedParticipantSpeech
*/
ResultReason[ResultReason["TranslatedParticipantSpeech"] = 20] = "TranslatedParticipantSpeech";
/**
* <summary>
* Indicates the transcription result contains the instant message and corresponding
* translation(s).
* @member ResultReason.TranslatedInstantMessage
*/
ResultReason[ResultReason["TranslatedInstantMessage"] = 21] = "TranslatedInstantMessage";
/**
* Indicates the transcription result contains the instant message for other participants
* in the conversation and corresponding translation(s).
* @member ResultReason.TranslatedParticipantInstantMessage
*/
ResultReason[ResultReason["TranslatedParticipantInstantMessage"] = 22] = "TranslatedParticipantInstantMessage";
})(ResultReason = exports.ResultReason || (exports.ResultReason = {}));
//# sourceMappingURL=ResultReason.js.map