tencentcloud-sdk-nodejs-intl-en
Version:
766 lines (644 loc) • 31.6 kB
JavaScript
/*
* Copyright (c) 2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
const AbstractModel = require("../../common/abstract_model");
/**
* CreateRecTask response structure.
* @class
*/
class CreateRecTaskResponse extends AbstractModel {
constructor(){
super();
/**
* Returned result of the recording recognition request, containing the task ID required for querying the result.
** Note: The task ID is valid for 24 hours, and duplicate task IDs of different dates may exist. Do not use task ID as the unique ID in your business system. **
* @type {Task || null}
*/
this.Data = null;
/**
* The unique request ID, generated by the server, will be returned for every request (if the request fails to reach the server for other reasons, the request will not obtain a RequestId). RequestId is required for locating a problem.
* @type {string || null}
*/
this.RequestId = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
if (params.Data) {
let obj = new Task();
obj.deserialize(params.Data)
this.Data = obj;
}
this.RequestId = 'RequestId' in params ? params.RequestId : null;
}
}
/**
* Keyword recognition result.
* @class
*/
class KeyWordResult extends AbstractModel {
constructor(){
super();
/**
* Keyword library ID.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.KeyWordLibID = null;
/**
* Keyword library name.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.KeyWordLibName = null;
/**
* Matching keywords.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {Array.<string> || null}
*/
this.KeyWords = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.KeyWordLibID = 'KeyWordLibID' in params ? params.KeyWordLibID : null;
this.KeyWordLibName = 'KeyWordLibName' in params ? params.KeyWordLibName : null;
this.KeyWords = 'KeyWords' in params ? params.KeyWords : null;
}
}
/**
* DescribeTaskStatus request structure.
* @class
*/
class DescribeTaskStatusRequest extends AbstractModel {
constructor(){
super();
/**
* Task ID obtained from the CreateRecTask API, which is used to obtain the task status and results.
** Note: A task is valid for 24 hours. Do not query the results with the tasks that have existed for more than 24 hours. **
* @type {number || null}
*/
this.TaskId = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.TaskId = 'TaskId' in params ? params.TaskId : null;
}
}
/**
* [Response parameters for obtaining recording recognition results] (https://intl.cloud.tencent.com/document/product/1093/37822?from_cn_redirect=1#3.-.E8.BE.93.E5.87.BA.E5.8F.82.E6.95.B0)
* @class
*/
class TaskStatus extends AbstractModel {
constructor(){
super();
/**
* Task ID. Note: The data type of TaskId is uint64.
* @type {number || null}
*/
this.TaskId = null;
/**
* Task status code. 0: waiting; 1: in process; 2: success; 3: failed.
* @type {number || null}
*/
this.Status = null;
/**
* Task status. Valid values: waiting, in process, success, and failed.
* @type {string || null}
*/
this.StatusStr = null;
/**
* Recognition result.
* @type {string || null}
*/
this.Result = null;
/**
* Failure cause.
* @type {string || null}
*/
this.ErrorMsg = null;
/**
* Recognition result details, including word time offsets for each sentence, which is generally used in subtitle generation scenarios. (This field is not left blank when ResTextFormat in the recording recognition request is set to 1.)
Note: This field may return null, indicating that no valid values can be obtained.
* @type {Array.<SentenceDetail> || null}
*/
this.ResultDetail = null;
/**
* Audio duration (seconds).
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.AudioDuration = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.TaskId = 'TaskId' in params ? params.TaskId : null;
this.Status = 'Status' in params ? params.Status : null;
this.StatusStr = 'StatusStr' in params ? params.StatusStr : null;
this.Result = 'Result' in params ? params.Result : null;
this.ErrorMsg = 'ErrorMsg' in params ? params.ErrorMsg : null;
if (params.ResultDetail) {
this.ResultDetail = new Array();
for (let z in params.ResultDetail) {
let obj = new SentenceDetail();
obj.deserialize(params.ResultDetail[z]);
this.ResultDetail.push(obj);
}
}
this.AudioDuration = 'AudioDuration' in params ? params.AudioDuration : null;
}
}
/**
* Returned data of the [recording recognition] (https://intl.cloud.tencent.com/document/product/1093/37823?from_cn_redirect=1#3.-.E8.BE.93.E5.87.BA.E5.8F.82.E6.95.B0) or [asynchronous real-time audio recognition] (https://intl.cloud.tencent.com/document/product/1093/52061?from_cn_redirect=1#3.-.E8.BE.93.E5.87.BA.E5.8F.82.E6.95.B0) request.
* @class
*/
class Task extends AbstractModel {
constructor(){
super();
/**
* Task ID. This ID can be used to obtain the recognition status and results through polling. The data type of TaskId is ** uint64 **.
** Note: The task ID is valid for 24 hours, and duplicate task IDs of different dates may exist. Do not use task ID as the unique ID in your business system. **
* @type {number || null}
*/
this.TaskId = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.TaskId = 'TaskId' in params ? params.TaskId : null;
}
}
/**
* CreateRecTask request structure.
* @class
*/
class CreateRecTaskRequest extends AbstractModel {
constructor(){
super();
/**
* Engine model type.
Each recognition engine adopts a specific billing plan. Engines marked with "large model version" adopt the large model billing plan. For product billing instructions, [click here] (https://intl.cloud.tencent.com/document/product/1093/35686?from_cn_redirect=1).
Note: If you want to recognize telecommunication audio but find that a 16k engine is required, you can use a 16k engine as described below for recognition. However, ** the 16k engines are not trained for recognizing telecommunication audio. Therefore, the recognition results cannot be guaranteed. You need to check whether the recognition results can be used. **
Engines for general scenarios:
** Note: Use 16k engines for scenarios other than telecommunication. **
** 16k_zh_large: ** Engine (large model version) for Mandarin, Chinese dialects, and English. This engine supports recognizing audio in Chinese, English, and [various Chinese dialects] (https://intl.cloud.tencent.com/document/product/1093/35682?from_cn_redirect=1). It has a large number of parameters, enhanced performance, and greatly improved recognition accuracy for low-quality audio with loud noise, too much echo, low voice volume, or faint voices. [Click here] (https://console.cloud.tencent.com/asr/demonstrate) to compare the recognition performance of the 16k_zh engine and this one.
** 16k_multi_lang: ** Engine (large model version) for multiple languages. This engine supports recognizing audio in English, Japanese, Korean, Arabic, Filipino, French, Hindi, Indonesian, Malay, Portuguese, Spanish, Thai, Turkish, Vietnamese, and German (sentence-level or paragraph-level).
** 16k_zh-PY: ** Engine for Chinese, English, and Cantonese. The engine supports recognizing audio in Mandarin, English, and Cantonese at the same time.
** 16k_ms: ** Engine for Malay.
** 16k_id: ** Engine for Indonesian.
** 16k_th: ** Engine for Thai.
* @type {string || null}
*/
this.EngineModelType = null;
/**
* Number of recognition channels.
1: Mono. (16k engines only support mono. ** Do no t** set to stereo.)
2: Stereo. (Stereo is supported only for 8k engines, and the two channels should correspond to the respective communication parties.)
Note:
16k engines: Only support mono. ** ChannelNum should be set to 1 **.
8k engines: Support both mono and stereo. ** It is recommended to set ChannelNum to 2 (indicating stereo) **. Stereo can physically distinguish speakers to avoid recognition mistakes caused by overlapping speech. It can provide the best speaker separation and recognition effects. Once stereo is set, the speakers are automatically separated. ** You do not need to enable the speaker separation feature **. You can use the default values for related parameters (** SpeakerDiarization and SpeakerNumber **). For speakerId in the returned ResultDetail, the value 0 represents the left channel, and the value 1 represents the right channel.
* @type {number || null}
*/
this.ChannelNum = null;
/**
* Format of the returned recognition result.
0: The basic recognition result (containing only valid voice timestamps but no word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail)).
1: The basic recognition result and word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail) (containing word-level timestamps and speech speed value but ** no punctuation **).
2: The basic recognition result and word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail) (containing word-level timestamps, speech speed value, and ** punctuation **).
3: The basic recognition result and word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail) (containing word-level timestamps, speech speed value, and ** punctuation **). The recognition results are segmented by punctuation. ** This format applies to subtitle scenarios **.
4: ** [Value-added paid feature] ** The basic recognition result and word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail) (containing word-level timestamps, speech speed value, and ** punctuation **). The recognition results are segmented by NLP semantics. ** This format applies to scenarios such as meeting and court record transcription ** and is supported only for 8k_zh and 16k_zh engines.
5: ** [Value-added paid feature] ** Basic recognition result and word-level [detailed recognition result] (https://intl.cloud.tencent.com/document/api/1093/37824?from_cn_redirect=1#SentenceDetail) (containing word-level timestamps, speech speed value, and ** punctuation **). The oral-to-written transcription result is also output, which has excluded modal particles and consecutive identical words, optimized expressions, and corrected speech mistakes. ** This format applies to scenarios of generating minutes for online and offline meetings** and is supported only for 8k_zh and 16k_zh engines.
Notes:
If this parameter is set to 4, make sure that a [semantics-based segmentation resource package] (https://intl.cloud.tencent.com/document/product/1093/35686?from_cn_redirect=1#97ae4aa0-29a0-4066-9f07-ccaf8856a16b) is purchased for your account or that your account has enabled post-payment. ** If post-payment is enabled and this parameter is set to 4, [automatic billing] (https://intl.cloud.tencent.com/document/product/1093/35686?from_cn_redirect=1#d912167d-ffd5-41a9-8b1c-2e89845a6852) will apply **.
If this parameter is set to 5, make sure that an [oral-to-written resource package] (https://intl.cloud.tencent.com/document/product/1093/35686?from_cn_redirect=1#97ae4aa0-29a0-4066-9f07-ccaf8856a16b) is purchased for your account or that your account has enabled post-payment. ** If post-payment is enabled and this parameter is set to 5, [automatic billing] (https://intl.cloud.tencent.com/document/product/1093/35686?from_cn_redirect=1#d912167d-ffd5-41a9-8b1c-2e89845a6852) will apply **.
* @type {number || null}
*/
this.ResTextFormat = null;
/**
* Audio source.
0: Audio URL.
1: Local audio file (body of the POST request).
* @type {number || null}
*/
this.SourceType = null;
/**
* Audio file Base64 code.
** This parameter is required if SourceType is set to 1. Otherwise, it can be left blank. **
Note: The audio data size cannot exceed 5 MB.
* @type {string || null}
*/
this.Data = null;
/**
* Data length (before Base64 encoding).
* @type {number || null}
*/
this.DataLen = null;
/**
* Audio URL. (The audio should be downloadable via a public network browser.)
** This parameter is required if SourceType is set to 0. Otherwise, it can be left blank. **
Notes:
1. Make sure that the total audio duration of recording files does not exceed 5 hours. Otherwise, recognition may fail.
2. Pay attention to file download to avoid download failure.
* @type {string || null}
*/
this.Url = null;
/**
* Callback URL
User-defined service URL for receiving recognition results.
For the callback format and content, see [Callback Description] (https://intl.cloud.tencent.com/document/product/1093/52632?from_cn_redirect=1).
Notes:
- If you use the polling method to obtain recognition results, this parameter is not required.
- It is recommended to include your business ID and other information in the callback URL for handling business logic.
* @type {string || null}
*/
this.CallbackUrl = null;
/**
* Whether to enable speaker separation.
0: Disable.
1: Enable. (This value is supported only for the following engines: 8k_zh, 16k_zh, 16k_ms, 16k_en, 16k_id, 16k_zh_large, and 16k_zh_dialect. ChannelNum should be set to 1.)
The default value is 0.
Note:
If an 8k engine is used and ChannelNum is set to 2 (stereo), use the default values for corresponding parameters as stated in the ** ChannelNum ** parameter description.
* @type {number || null}
*/
this.SpeakerDiarization = null;
/**
* Number of speakers to be separated.
** Speaker separation must be enabled. Otherwise, this parameter does not take effect. ** Value range: 0-10.
0: Automatic separation. (Up to 20 speakers can be separated.)
1-10: Specify the number of speakers.
The default value is 0.
* @type {number || null}
*/
this.SpeakerNumber = null;
/**
* This service is not available.
* @type {string || null}
*/
this.HotwordId = null;
/**
* This service is not available.
* @type {number || null}
*/
this.ReinforceHotword = null;
/**
* This service is not available.
* @type {string || null}
*/
this.CustomizationId = null;
/**
* This service is not available.
* @type {number || null}
*/
this.EmotionRecognition = null;
/**
* Emotional energy value.
The value is the result of dividing the sound volume in dB by 10. Value range: [1,10]. The higher the value, the stronger the emotion.
0: Disable.
1: Enable.
The default value is 0.
* @type {number || null}
*/
this.EmotionalEnergy = null;
/**
* Intelligent conversion into Arabic numerals (supported only for engines for recognizing audio in Mandarin).
0: Do not convert, but directly output Chinese numerals.
1: Intelligently convert into Arabic numerals based on the scenario.
3: Enable conversion for math-related letters.
The default value is 1.
* @type {number || null}
*/
this.ConvertNumMode = null;
/**
* Dirty word filtering (supported only for engines for recognizing audio in Mandarin).
0: Do not filter out dirty words.
1: Filter out dirty words.
2: Replace dirty words with *.
The default value is 0.
* @type {number || null}
*/
this.FilterDirty = null;
/**
* Punctuation filtering (supported only for engines for recognizing audio in Mandarin).
0: Do not filter out punctuation.
1: Filter out sentence-ending punctuation.
2: Filter out all punctuation.
The default value is 0.
* @type {number || null}
*/
this.FilterPunc = null;
/**
* Modal particle filtering (supported only for engines for recognizing audio in Mandarin).
0: Do not filter out modal particles.
1: Filter out specified modal particles.
2: Filter out all modal particles.
The default value is 0.
* @type {number || null}
*/
this.FilterModal = null;
/**
* The maximum number of characters per line (supported only for engines for recognizing audio in Mandarin). A punctuation mark is added if this limit is reached.
** This parameter can control the maximum number of characters per line, which applies to subtitle generation scenarios. ** Value range: [6,40].
0: Disable this feature.
The default value is 0.
Note: To enable this feature, ResTextFormat should be set to 3. The recognition result can be obtained from FinalSentence by parsing the list in the returned ResultDetail.
* @type {number || null}
*/
this.SentenceMaxLength = null;
/**
* Additional parameter. ** (This parameter is meaningless. Ignore it.) **
* @type {string || null}
*/
this.Extra = null;
/**
* Temporary term list. This parameter is used to improve the recognition accuracy.
- Restrictions for individual terms: The format is "term|weight". Each term can contain no more than 30 characters (or 10 Chinese characters. The weight can be in the range of [1-11] or 100. For example, "Tencent Cloud|5" or "ASR|11".
- Restrictions for the temporary term list: Multiple terms are separated by commas. The list can contain up to 128 terms. For example, "Tencent Cloud|10, Audio Recognition|5, ASR|11".
- Difference between hotword_id (term list) and hotword_list (temporary term list):
- hotword_id: Term list. You need to create a term list in the console or by using the API first and obtain the term list ID.
- hotword_list: Temporary term list. You can directly enter the ID of the temporary term list each time you initiate a request. The temporary term list is not retained on the cloud. This parameter applies to users with a massive number of terms.
Notes:
- If both hotword_id and hotword_list are specified, hotword_list will take effect first.
- When the weight of a term is set to 11, this term becomes a super term. It is recommended that the weight is set to 11 only for critical and necessary terms. The overall accuracy will be affected if the weight is set to 11 for too many terms.
- When the weight of a term is set to 100, the term enhancement feature is enabled to replace homophones of this term. (This feature is supported only for 8k_zh and 16k_zh engines.) For example, if you configure "mizhi 1|100", the recognized word "mizhi 2", which is a homophone of "mizhi 2", will be forcibly replaced with "mizhi 2". It is recommended that you enable this feature based on the actual needs. You can set the weight to 100 for only critical and necessary terms. The overall accuracy will be affected if the weight is set to 100 for too many terms.
* @type {string || null}
*/
this.HotwordList = null;
/**
* List of keyword IDs for recognition. This parameter is left blank by default, indicating that no keyword is recognized. You can enter up to 10 IDs.
* @type {Array.<string> || null}
*/
this.KeyWordLibIdList = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.EngineModelType = 'EngineModelType' in params ? params.EngineModelType : null;
this.ChannelNum = 'ChannelNum' in params ? params.ChannelNum : null;
this.ResTextFormat = 'ResTextFormat' in params ? params.ResTextFormat : null;
this.SourceType = 'SourceType' in params ? params.SourceType : null;
this.Data = 'Data' in params ? params.Data : null;
this.DataLen = 'DataLen' in params ? params.DataLen : null;
this.Url = 'Url' in params ? params.Url : null;
this.CallbackUrl = 'CallbackUrl' in params ? params.CallbackUrl : null;
this.SpeakerDiarization = 'SpeakerDiarization' in params ? params.SpeakerDiarization : null;
this.SpeakerNumber = 'SpeakerNumber' in params ? params.SpeakerNumber : null;
this.HotwordId = 'HotwordId' in params ? params.HotwordId : null;
this.ReinforceHotword = 'ReinforceHotword' in params ? params.ReinforceHotword : null;
this.CustomizationId = 'CustomizationId' in params ? params.CustomizationId : null;
this.EmotionRecognition = 'EmotionRecognition' in params ? params.EmotionRecognition : null;
this.EmotionalEnergy = 'EmotionalEnergy' in params ? params.EmotionalEnergy : null;
this.ConvertNumMode = 'ConvertNumMode' in params ? params.ConvertNumMode : null;
this.FilterDirty = 'FilterDirty' in params ? params.FilterDirty : null;
this.FilterPunc = 'FilterPunc' in params ? params.FilterPunc : null;
this.FilterModal = 'FilterModal' in params ? params.FilterModal : null;
this.SentenceMaxLength = 'SentenceMaxLength' in params ? params.SentenceMaxLength : null;
this.Extra = 'Extra' in params ? params.Extra : null;
this.HotwordList = 'HotwordList' in params ? params.HotwordList : null;
this.KeyWordLibIdList = 'KeyWordLibIdList' in params ? params.KeyWordLibIdList : null;
}
}
/**
* DescribeTaskStatus response structure.
* @class
*/
class DescribeTaskStatusResponse extends AbstractModel {
constructor(){
super();
/**
* Returned result of the recording recognition request.
* @type {TaskStatus || null}
*/
this.Data = null;
/**
* The unique request ID, generated by the server, will be returned for every request (if the request fails to reach the server for other reasons, the request will not obtain a RequestId). RequestId is required for locating a problem.
* @type {string || null}
*/
this.RequestId = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
if (params.Data) {
let obj = new TaskStatus();
obj.deserialize(params.Data)
this.Data = obj;
}
this.RequestId = 'RequestId' in params ? params.RequestId : null;
}
}
/**
* Detailed recognition result of a sentence, including the time offset of individual words. This parameter generally applies to subtitle generation scenarios.
* @class
*/
class SentenceDetail extends AbstractModel {
constructor(){
super();
/**
* Final recognition result of a sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.FinalSentence = null;
/**
* Intermediate recognition result of a sentence. The sentence is split into multiple phrases by spaces.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.SliceSentence = null;
/**
* Oral-to-written transcription result. This parameter has a value only if the corresponding feature is enabled.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.WrittenText = null;
/**
* Start time of a sentence (ms).
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.StartMs = null;
/**
* End time of a sentence (ms).
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.EndMs = null;
/**
* Number of words in a sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.WordsNum = null;
/**
* Word details of a sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {Array.<SentenceWords> || null}
*/
this.Words = null;
/**
* Speech speed of a sentence. Unit: Number of words per second.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.SpeechSpeed = null;
/**
* Channel or speaker ID. (If speaker_diarization is specified or ChannelNum is set to 2 (stereo) in the request, speakers or channels can be distinguished.)
Different values represent different speakers in mono mode. For the speakerId values, 0 represents the left channel, and 1 represents the right channel in stereo mode if an 8k engine is used.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.SpeakerId = null;
/**
* Emotional energy value. This value is the result of dividing the sound volume in dB by 10. Value range: [1,10]. The higher the value, the stronger the emotion.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.EmotionalEnergy = null;
/**
* Silent duration between the current sentence and the last sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.SilenceTime = null;
/**
* Emotion type. (This parameter may be left blank in two scenarios: 1. No corresponding resource package exists; 2. The emotion is not recognized because it is not strong enough, which is related to the emotional energy.)
Note: This field may return null, indicating that no valid values can be obtained.
* @type {Array.<string> || null}
*/
this.EmotionType = null;
/**
* List of recognized keywords.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {Array.<KeyWordResult> || null}
*/
this.KeyWordResults = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.FinalSentence = 'FinalSentence' in params ? params.FinalSentence : null;
this.SliceSentence = 'SliceSentence' in params ? params.SliceSentence : null;
this.WrittenText = 'WrittenText' in params ? params.WrittenText : null;
this.StartMs = 'StartMs' in params ? params.StartMs : null;
this.EndMs = 'EndMs' in params ? params.EndMs : null;
this.WordsNum = 'WordsNum' in params ? params.WordsNum : null;
if (params.Words) {
this.Words = new Array();
for (let z in params.Words) {
let obj = new SentenceWords();
obj.deserialize(params.Words[z]);
this.Words.push(obj);
}
}
this.SpeechSpeed = 'SpeechSpeed' in params ? params.SpeechSpeed : null;
this.SpeakerId = 'SpeakerId' in params ? params.SpeakerId : null;
this.EmotionalEnergy = 'EmotionalEnergy' in params ? params.EmotionalEnergy : null;
this.SilenceTime = 'SilenceTime' in params ? params.SilenceTime : null;
this.EmotionType = 'EmotionType' in params ? params.EmotionType : null;
if (params.KeyWordResults) {
this.KeyWordResults = new Array();
for (let z in params.KeyWordResults) {
let obj = new KeyWordResult();
obj.deserialize(params.KeyWordResults[z]);
this.KeyWordResults.push(obj);
}
}
}
}
/**
* Word text in the recognition result and the corresponding time offset.
* @class
*/
class SentenceWords extends AbstractModel {
constructor(){
super();
/**
* Word text.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {string || null}
*/
this.Word = null;
/**
* Start time offset in the sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.OffsetStartMs = null;
/**
* End time offset in the sentence.
Note: This field may return null, indicating that no valid values can be obtained.
* @type {number || null}
*/
this.OffsetEndMs = null;
}
/**
* @private
*/
deserialize(params) {
if (!params) {
return;
}
this.Word = 'Word' in params ? params.Word : null;
this.OffsetStartMs = 'OffsetStartMs' in params ? params.OffsetStartMs : null;
this.OffsetEndMs = 'OffsetEndMs' in params ? params.OffsetEndMs : null;
}
}
module.exports = {
CreateRecTaskResponse: CreateRecTaskResponse,
KeyWordResult: KeyWordResult,
DescribeTaskStatusRequest: DescribeTaskStatusRequest,
TaskStatus: TaskStatus,
Task: Task,
CreateRecTaskRequest: CreateRecTaskRequest,
DescribeTaskStatusResponse: DescribeTaskStatusResponse,
SentenceDetail: SentenceDetail,
SentenceWords: SentenceWords,
}