UNPKG

microsoft-cognitiveservices-speech-sdk

Version:
1 lines 3.25 kB
{"version":3,"sources":["src/common.speech/ServiceMessages/LanguageId/LanguageIdContext.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AACxC,OAAO,EAAE,SAAS,EAAE,MAAM,aAAa,CAAC;AACxC,OAAO,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAC;AAEtD;;;;GAIG;AACH,oBAAY,uBAAuB;IAC/B,kBAAkB,uBAAuB;IACzC,gBAAgB,qBAAqB;IACrC,cAAc,mBAAmB;CACpC;AAED;;;GAGG;AACH,oBAAY,2BAA2B;IACnC;;OAEG;IACH,IAAI,SAAS;IAEb;;OAEG;IACH,iBAAiB,sBAAsB;IAEvC;;OAEG;IACH,kBAAkB,uBAAuB;CAC5C;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAC9B;;OAEG;IACH,SAAS,EAAE,MAAM,EAAE,CAAC;IAEpB;;OAEG;IACH,SAAS,CAAC,EAAE,SAAS,CAAC;IAEtB;;OAEG;IACH,IAAI,CAAC,EAAE,uBAAuB,CAAC;IAE/B;;OAEG;IACH,SAAS,CAAC,EAAE,SAAS,CAAC;IAEtB;;OAEG;IACH,MAAM,CAAC,EAAE,gBAAgB,CAAC;IAE1B;;OAEG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAE1B;;OAEG;IACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAE3B;;OAEG;IACH,QAAQ,CAAC,EAAE,2BAA2B,CAAC;CAC1C","file":"LanguageIdContext.d.ts","sourcesContent":["//\r\n// Copyright (c) Microsoft. All rights reserved.\r\n// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.\r\n//\r\n\r\nimport { OnSuccess } from \"./OnSuccess\";\r\nimport { OnUnknown } from \"./OnUnknown\";\r\nimport { LanguageIdOutput } from \"./LanguageIdOutput\";\r\n\r\n/**\r\n * The enum that represents which mode will language detection take place\r\n * There is only detectAtAudioStart mode for now as language detection models are not trained for different modes\r\n * This enum can be extended in future to support different modes\r\n */\r\nexport enum LanguageIdDetectionMode {\r\n DetectAtAudioStart = \"DetectAtAudioStart\",\r\n DetectContinuous = \"DetectContinuous\",\r\n DetectSegments = \"DetectSegments\"\r\n}\r\n\r\n/**\r\n * The language id detection mode, setting this will load the detection setting of MaxAudioDuration and MaxSpeechDuration\r\n * If the maxAudioDuration and maxSpeechDuration is set in the speech.context, then this detection mode will be ignored\r\n */\r\nexport enum LanguageIdDetectionPriority {\r\n /**\r\n * default, Service decides the best mode to use.\r\n */\r\n Auto = \"Auto\",\r\n\r\n /**\r\n * Offers lower latency via a trade-off of accuracy.\r\n */\r\n PrioritizeLatency = \"PrioritizeLatency\",\r\n\r\n /**\r\n * Offers higher accuracy via a trade-off of latency.\r\n */\r\n PrioritizeAccuracy = \"PrioritizeAccuracy\"\r\n}\r\n\r\n/**\r\n * The language id context\r\n */\r\nexport interface LanguageIdContext {\r\n /**\r\n * The candidate languages for speaker language detection.\r\n */\r\n languages: string[];\r\n\r\n /**\r\n * The on success action.\r\n */\r\n onSuccess?: OnSuccess;\r\n\r\n /**\r\n * The language detection mode.\r\n */\r\n mode?: LanguageIdDetectionMode;\r\n\r\n /**\r\n * The fallback language.\r\n */\r\n onUnknown?: OnUnknown;\r\n\r\n /**\r\n * The output\r\n */\r\n output?: LanguageIdOutput;\r\n\r\n /**\r\n * The max audio duration\r\n */\r\n maxAudioDuration?: number;\r\n\r\n /**\r\n * The max speech duration\r\n */\r\n maxSpeechDuration?: number;\r\n\r\n /**\r\n * The priority.\r\n */\r\n priority?: LanguageIdDetectionPriority;\r\n}\r\n"]}