UNPKG

aws-sdk

Version:
886 lines 90.1 kB
import {Request} from '../lib/request'; import {Response} from '../lib/response'; import {AWSError} from '../lib/error'; import {Service} from '../lib/service'; import {ServiceConfigurationOptions} from '../lib/service'; import {ConfigBase as Config} from '../lib/config-base'; interface Blob {} declare class ChimeSDKMediaPipelines extends Service { /** * Constructs a service object. This object has one method for each API operation. */ constructor(options?: ChimeSDKMediaPipelines.Types.ClientConfiguration) config: Config & ChimeSDKMediaPipelines.Types.ClientConfiguration; /** * Creates a media pipeline. */ createMediaCapturePipeline(params: ChimeSDKMediaPipelines.Types.CreateMediaCapturePipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaCapturePipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaCapturePipelineResponse, AWSError>; /** * Creates a media pipeline. */ createMediaCapturePipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaCapturePipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaCapturePipelineResponse, AWSError>; /** * Creates a media concatenation pipeline. */ createMediaConcatenationPipeline(params: ChimeSDKMediaPipelines.Types.CreateMediaConcatenationPipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaConcatenationPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaConcatenationPipelineResponse, AWSError>; /** * Creates a media concatenation pipeline. */ createMediaConcatenationPipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaConcatenationPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaConcatenationPipelineResponse, AWSError>; /** * Creates a media insights pipeline. */ createMediaInsightsPipeline(params: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineResponse, AWSError>; /** * Creates a media insights pipeline. */ createMediaInsightsPipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineResponse, AWSError>; /** * A structure that contains the static configurations for a media insights pipeline. */ createMediaInsightsPipelineConfiguration(params: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineConfigurationRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineConfigurationResponse, AWSError>; /** * A structure that contains the static configurations for a media insights pipeline. */ createMediaInsightsPipelineConfiguration(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaInsightsPipelineConfigurationResponse, AWSError>; /** * Creates a media live connector pipeline in an Amazon Chime SDK meeting. */ createMediaLiveConnectorPipeline(params: ChimeSDKMediaPipelines.Types.CreateMediaLiveConnectorPipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaLiveConnectorPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaLiveConnectorPipelineResponse, AWSError>; /** * Creates a media live connector pipeline in an Amazon Chime SDK meeting. */ createMediaLiveConnectorPipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaLiveConnectorPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaLiveConnectorPipelineResponse, AWSError>; /** * Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS stream can be in any available Region, including an opt-in Region. For example, if the meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account Management Reference Guide. */ createMediaPipelineKinesisVideoStreamPool(params: ChimeSDKMediaPipelines.Types.CreateMediaPipelineKinesisVideoStreamPoolRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; /** * Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS stream can be in any available Region, including an opt-in Region. For example, if the meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account Management Reference Guide. */ createMediaPipelineKinesisVideoStreamPool(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; /** * Creates a streaming media pipeline. */ createMediaStreamPipeline(params: ChimeSDKMediaPipelines.Types.CreateMediaStreamPipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaStreamPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaStreamPipelineResponse, AWSError>; /** * Creates a streaming media pipeline. */ createMediaStreamPipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.CreateMediaStreamPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.CreateMediaStreamPipelineResponse, AWSError>; /** * Deletes the media pipeline. */ deleteMediaCapturePipeline(params: ChimeSDKMediaPipelines.Types.DeleteMediaCapturePipelineRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes the media pipeline. */ deleteMediaCapturePipeline(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes the specified configuration settings. */ deleteMediaInsightsPipelineConfiguration(params: ChimeSDKMediaPipelines.Types.DeleteMediaInsightsPipelineConfigurationRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes the specified configuration settings. */ deleteMediaInsightsPipelineConfiguration(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes the media pipeline. */ deleteMediaPipeline(params: ChimeSDKMediaPipelines.Types.DeleteMediaPipelineRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes the media pipeline. */ deleteMediaPipeline(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes an Amazon Kinesis Video Stream pool. */ deleteMediaPipelineKinesisVideoStreamPool(params: ChimeSDKMediaPipelines.Types.DeleteMediaPipelineKinesisVideoStreamPoolRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Deletes an Amazon Kinesis Video Stream pool. */ deleteMediaPipelineKinesisVideoStreamPool(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Gets an existing media pipeline. */ getMediaCapturePipeline(params: ChimeSDKMediaPipelines.Types.GetMediaCapturePipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaCapturePipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaCapturePipelineResponse, AWSError>; /** * Gets an existing media pipeline. */ getMediaCapturePipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaCapturePipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaCapturePipelineResponse, AWSError>; /** * Gets the configuration settings for a media insights pipeline. */ getMediaInsightsPipelineConfiguration(params: ChimeSDKMediaPipelines.Types.GetMediaInsightsPipelineConfigurationRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaInsightsPipelineConfigurationResponse, AWSError>; /** * Gets the configuration settings for a media insights pipeline. */ getMediaInsightsPipelineConfiguration(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaInsightsPipelineConfigurationResponse, AWSError>; /** * Gets an existing media pipeline. */ getMediaPipeline(params: ChimeSDKMediaPipelines.Types.GetMediaPipelineRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaPipelineResponse, AWSError>; /** * Gets an existing media pipeline. */ getMediaPipeline(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaPipelineResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaPipelineResponse, AWSError>; /** * Gets an Kinesis video stream pool. */ getMediaPipelineKinesisVideoStreamPool(params: ChimeSDKMediaPipelines.Types.GetMediaPipelineKinesisVideoStreamPoolRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; /** * Gets an Kinesis video stream pool. */ getMediaPipelineKinesisVideoStreamPool(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; /** * Retrieves the details of the specified speaker search task. */ getSpeakerSearchTask(params: ChimeSDKMediaPipelines.Types.GetSpeakerSearchTaskRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetSpeakerSearchTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetSpeakerSearchTaskResponse, AWSError>; /** * Retrieves the details of the specified speaker search task. */ getSpeakerSearchTask(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetSpeakerSearchTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetSpeakerSearchTaskResponse, AWSError>; /** * Retrieves the details of a voice tone analysis task. */ getVoiceToneAnalysisTask(params: ChimeSDKMediaPipelines.Types.GetVoiceToneAnalysisTaskRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetVoiceToneAnalysisTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetVoiceToneAnalysisTaskResponse, AWSError>; /** * Retrieves the details of a voice tone analysis task. */ getVoiceToneAnalysisTask(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.GetVoiceToneAnalysisTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.GetVoiceToneAnalysisTaskResponse, AWSError>; /** * Returns a list of media pipelines. */ listMediaCapturePipelines(params: ChimeSDKMediaPipelines.Types.ListMediaCapturePipelinesRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaCapturePipelinesResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaCapturePipelinesResponse, AWSError>; /** * Returns a list of media pipelines. */ listMediaCapturePipelines(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaCapturePipelinesResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaCapturePipelinesResponse, AWSError>; /** * Lists the available media insights pipeline configurations. */ listMediaInsightsPipelineConfigurations(params: ChimeSDKMediaPipelines.Types.ListMediaInsightsPipelineConfigurationsRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaInsightsPipelineConfigurationsResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaInsightsPipelineConfigurationsResponse, AWSError>; /** * Lists the available media insights pipeline configurations. */ listMediaInsightsPipelineConfigurations(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaInsightsPipelineConfigurationsResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaInsightsPipelineConfigurationsResponse, AWSError>; /** * Lists the video stream pools in the media pipeline. */ listMediaPipelineKinesisVideoStreamPools(params: ChimeSDKMediaPipelines.Types.ListMediaPipelineKinesisVideoStreamPoolsRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaPipelineKinesisVideoStreamPoolsResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaPipelineKinesisVideoStreamPoolsResponse, AWSError>; /** * Lists the video stream pools in the media pipeline. */ listMediaPipelineKinesisVideoStreamPools(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaPipelineKinesisVideoStreamPoolsResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaPipelineKinesisVideoStreamPoolsResponse, AWSError>; /** * Returns a list of media pipelines. */ listMediaPipelines(params: ChimeSDKMediaPipelines.Types.ListMediaPipelinesRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaPipelinesResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaPipelinesResponse, AWSError>; /** * Returns a list of media pipelines. */ listMediaPipelines(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListMediaPipelinesResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListMediaPipelinesResponse, AWSError>; /** * Lists the tags available for a media pipeline. */ listTagsForResource(params: ChimeSDKMediaPipelines.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListTagsForResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListTagsForResourceResponse, AWSError>; /** * Lists the tags available for a media pipeline. */ listTagsForResource(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.ListTagsForResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.ListTagsForResourceResponse, AWSError>; /** * Starts a speaker search task. Before starting any speaker search tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK. */ startSpeakerSearchTask(params: ChimeSDKMediaPipelines.Types.StartSpeakerSearchTaskRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.StartSpeakerSearchTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.StartSpeakerSearchTaskResponse, AWSError>; /** * Starts a speaker search task. Before starting any speaker search tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK. */ startSpeakerSearchTask(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.StartSpeakerSearchTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.StartSpeakerSearchTaskResponse, AWSError>; /** * Starts a voice tone analysis task. For more information about voice tone analysis, see Using Amazon Chime SDK voice analytics in the Amazon Chime SDK Developer Guide. Before starting any voice tone analysis tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK. */ startVoiceToneAnalysisTask(params: ChimeSDKMediaPipelines.Types.StartVoiceToneAnalysisTaskRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.StartVoiceToneAnalysisTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.StartVoiceToneAnalysisTaskResponse, AWSError>; /** * Starts a voice tone analysis task. For more information about voice tone analysis, see Using Amazon Chime SDK voice analytics in the Amazon Chime SDK Developer Guide. Before starting any voice tone analysis tasks, you must provide all notices and obtain all consents from the speaker as required under applicable privacy and biometrics laws, and as required under the AWS service terms for the Amazon Chime SDK. */ startVoiceToneAnalysisTask(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.StartVoiceToneAnalysisTaskResponse) => void): Request<ChimeSDKMediaPipelines.Types.StartVoiceToneAnalysisTaskResponse, AWSError>; /** * Stops a speaker search task. */ stopSpeakerSearchTask(params: ChimeSDKMediaPipelines.Types.StopSpeakerSearchTaskRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Stops a speaker search task. */ stopSpeakerSearchTask(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Stops a voice tone analysis task. */ stopVoiceToneAnalysisTask(params: ChimeSDKMediaPipelines.Types.StopVoiceToneAnalysisTaskRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Stops a voice tone analysis task. */ stopVoiceToneAnalysisTask(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * The ARN of the media pipeline that you want to tag. Consists of the pipeline's endpoint region, resource ID, and pipeline ID. */ tagResource(params: ChimeSDKMediaPipelines.Types.TagResourceRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.TagResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.TagResourceResponse, AWSError>; /** * The ARN of the media pipeline that you want to tag. Consists of the pipeline's endpoint region, resource ID, and pipeline ID. */ tagResource(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.TagResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.TagResourceResponse, AWSError>; /** * Removes any tags from a media pipeline. */ untagResource(params: ChimeSDKMediaPipelines.Types.UntagResourceRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UntagResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.UntagResourceResponse, AWSError>; /** * Removes any tags from a media pipeline. */ untagResource(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UntagResourceResponse) => void): Request<ChimeSDKMediaPipelines.Types.UntagResourceResponse, AWSError>; /** * Updates the media insights pipeline's configuration settings. */ updateMediaInsightsPipelineConfiguration(params: ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineConfigurationRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineConfigurationResponse, AWSError>; /** * Updates the media insights pipeline's configuration settings. */ updateMediaInsightsPipelineConfiguration(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineConfigurationResponse) => void): Request<ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineConfigurationResponse, AWSError>; /** * Updates the status of a media insights pipeline. */ updateMediaInsightsPipelineStatus(params: ChimeSDKMediaPipelines.Types.UpdateMediaInsightsPipelineStatusRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Updates the status of a media insights pipeline. */ updateMediaInsightsPipelineStatus(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Updates an Amazon Kinesis Video Stream pool in a media pipeline. */ updateMediaPipelineKinesisVideoStreamPool(params: ChimeSDKMediaPipelines.Types.UpdateMediaPipelineKinesisVideoStreamPoolRequest, callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UpdateMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.UpdateMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; /** * Updates an Amazon Kinesis Video Stream pool in a media pipeline. */ updateMediaPipelineKinesisVideoStreamPool(callback?: (err: AWSError, data: ChimeSDKMediaPipelines.Types.UpdateMediaPipelineKinesisVideoStreamPoolResponse) => void): Request<ChimeSDKMediaPipelines.Types.UpdateMediaPipelineKinesisVideoStreamPoolResponse, AWSError>; } declare namespace ChimeSDKMediaPipelines { export interface ActiveSpeakerOnlyConfiguration { /** * The position of the ActiveSpeakerOnly video tile. */ ActiveSpeakerPosition?: ActiveSpeakerPosition; } export type ActiveSpeakerPosition = "TopLeft"|"TopRight"|"BottomLeft"|"BottomRight"|string; export type AmazonResourceName = string; export interface AmazonTranscribeCallAnalyticsProcessorConfiguration { /** * The language code in the configuration. */ LanguageCode: CallAnalyticsLanguageCode; /** * Specifies the name of the custom vocabulary to use when processing a transcription. Note that vocabulary names are case sensitive. If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription. For more information, see Custom vocabularies in the Amazon Transcribe Developer Guide. Length Constraints: Minimum length of 1. Maximum length of 200. */ VocabularyName?: VocabularyName; /** * Specifies the name of the custom vocabulary filter to use when processing a transcription. Note that vocabulary filter names are case sensitive. If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription. For more information, see Using vocabulary filtering with unwanted words in the Amazon Transcribe Developer Guide. Length Constraints: Minimum length of 1. Maximum length of 200. */ VocabularyFilterName?: VocabularyFilterName; /** * Specifies how to apply a vocabulary filter to a transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag. */ VocabularyFilterMethod?: VocabularyFilterMethod; /** * Specifies the name of the custom language model to use when processing a transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code specified in the transcription request. If the languages don't match, the custom language model isn't applied. Language mismatches don't generate errors or warnings. For more information, see Custom language models in the Amazon Transcribe Developer Guide. */ LanguageModelName?: ModelName; /** * Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization in the Amazon Transcribe Developer Guide. */ EnablePartialResultsStabilization?: Boolean; /** * Specifies the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization in the Amazon Transcribe Developer Guide. */ PartialResultsStability?: PartialResultsStability; /** * Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you do, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information in the Amazon Transcribe Developer Guide. */ ContentIdentificationType?: ContentType; /** * Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you do, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information in the Amazon Transcribe Developer Guide. */ ContentRedactionType?: ContentType; /** * Specifies the types of personally identifiable information (PII) to redact from a transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your Call Analytics request, you must also include ContentIdentificationType or ContentRedactionType, but you can't include both. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. Length Constraints: Minimum length of 1. Maximum length of 300. */ PiiEntityTypes?: PiiEntityTypes; /** * If true, UtteranceEvents with IsPartial: true are filtered out of the insights target. */ FilterPartialResults?: Boolean; /** * The settings for a post-call analysis task in an analytics configuration. */ PostCallAnalyticsSettings?: PostCallAnalyticsSettings; /** * By default, all CategoryEvents are sent to the insights target. If this parameter is specified, only included categories are sent to the insights target. */ CallAnalyticsStreamCategories?: CategoryNameList; } export interface AmazonTranscribeProcessorConfiguration { /** * The language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification. For a list of languages that real-time Call Analytics supports, see the Supported languages table in the Amazon Transcribe Developer Guide. */ LanguageCode?: CallAnalyticsLanguageCode; /** * The name of the custom vocabulary that you specified in your Call Analytics request. Length Constraints: Minimum length of 1. Maximum length of 200. */ VocabularyName?: VocabularyName; /** * The name of the custom vocabulary filter that you specified in your Call Analytics request. Length Constraints: Minimum length of 1. Maximum length of 200. */ VocabularyFilterName?: VocabularyFilterName; /** * The vocabulary filtering method used in your Call Analytics transcription. */ VocabularyFilterMethod?: VocabularyFilterMethod; /** * Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization) in the Amazon Transcribe Developer Guide. */ ShowSpeakerLabel?: Boolean; /** * Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization in the Amazon Transcribe Developer Guide. */ EnablePartialResultsStabilization?: Boolean; /** * The level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization in the Amazon Transcribe Developer Guide. */ PartialResultsStability?: PartialResultsStability; /** * Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information in the Amazon Transcribe Developer Guide. */ ContentIdentificationType?: ContentType; /** * Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information in the Amazon Transcribe Developer Guide. */ ContentRedactionType?: ContentType; /** * The types of personally identifiable information (PII) to redact from a transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your Call Analytics request, you must also include ContentIdentificationType or ContentRedactionType, but you can't include both. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. If you leave this parameter empty, the default behavior is equivalent to ALL. */ PiiEntityTypes?: PiiEntityTypes; /** * The name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. For more information, see Custom language models in the Amazon Transcribe Developer Guide. */ LanguageModelName?: ModelName; /** * If true, TranscriptEvents with IsPartial: true are filtered out of the insights target. */ FilterPartialResults?: Boolean; /** * Turns language identification on or off. */ IdentifyLanguage?: Boolean; /** * Turns language identification on or off for multiple languages. */ IdentifyMultipleLanguages?: Boolean; /** * The language options for the transcription, such as automatic language detection. */ LanguageOptions?: LanguageOptions; /** * The preferred language for the transcription. */ PreferredLanguage?: CallAnalyticsLanguageCode; /** * The names of the custom vocabulary or vocabularies used during transcription. */ VocabularyNames?: VocabularyNames; /** * The names of the custom vocabulary filter or filters using during transcription. */ VocabularyFilterNames?: VocabularyFilterNames; } export type Arn = string; export interface ArtifactsConcatenationConfiguration { /** * The configuration for the audio artifacts concatenation. */ Audio: AudioConcatenationConfiguration; /** * The configuration for the video artifacts concatenation. */ Video: VideoConcatenationConfiguration; /** * The configuration for the content artifacts concatenation. */ Content: ContentConcatenationConfiguration; /** * The configuration for the data channel artifacts concatenation. */ DataChannel: DataChannelConcatenationConfiguration; /** * The configuration for the transcription messages artifacts concatenation. */ TranscriptionMessages: TranscriptionMessagesConcatenationConfiguration; /** * The configuration for the meeting events artifacts concatenation. */ MeetingEvents: MeetingEventsConcatenationConfiguration; /** * The configuration for the composited video artifacts concatenation. */ CompositedVideo: CompositedVideoConcatenationConfiguration; } export type ArtifactsConcatenationState = "Enabled"|"Disabled"|string; export interface ArtifactsConfiguration { /** * The configuration for the audio artifacts. */ Audio: AudioArtifactsConfiguration; /** * The configuration for the video artifacts. */ Video: VideoArtifactsConfiguration; /** * The configuration for the content artifacts. */ Content: ContentArtifactsConfiguration; /** * Enables video compositing. */ CompositedVideo?: CompositedVideoArtifactsConfiguration; } export type ArtifactsState = "Enabled"|"Disabled"|string; export type AttendeeIdList = GuidString[]; export type AudioArtifactsConcatenationState = "Enabled"|string; export interface AudioArtifactsConfiguration { /** * The MUX type of the audio artifact configuration object. */ MuxType: AudioMuxType; } export type AudioChannelsOption = "Stereo"|"Mono"|string; export interface AudioConcatenationConfiguration { /** * Enables or disables the configuration object. */ State: AudioArtifactsConcatenationState; } export type AudioMuxType = "AudioOnly"|"AudioWithActiveSpeakerVideo"|"AudioWithCompositedVideo"|string; export type AudioSampleRateOption = string; export type AwsRegion = string; export type Boolean = boolean; export type BorderColor = "Black"|"Blue"|"Red"|"Green"|"White"|"Yellow"|string; export type BorderThickness = number; export type CallAnalyticsLanguageCode = "en-US"|"en-GB"|"es-US"|"fr-CA"|"fr-FR"|"en-AU"|"it-IT"|"de-DE"|"pt-BR"|string; export type CanvasOrientation = "Landscape"|"Portrait"|string; export type CategoryName = string; export type CategoryNameList = CategoryName[]; export interface ChannelDefinition { /** * The channel ID. */ ChannelId: ChannelId; /** * Specifies whether the audio in a channel belongs to the AGENT or CUSTOMER. */ ParticipantRole?: ParticipantRole; } export type ChannelDefinitions = ChannelDefinition[]; export type ChannelId = number; export interface ChimeSdkMeetingConcatenationConfiguration { /** * The configuration for the artifacts in an Amazon Chime SDK meeting concatenation. */ ArtifactsConfiguration: ArtifactsConcatenationConfiguration; } export interface ChimeSdkMeetingConfiguration { /** * The source configuration for a specified media pipeline. */ SourceConfiguration?: SourceConfiguration; /** * The configuration for the artifacts in an Amazon Chime SDK meeting. */ ArtifactsConfiguration?: ArtifactsConfiguration; } export interface ChimeSdkMeetingLiveConnectorConfiguration { /** * The configuration object's Chime SDK meeting ARN. */ Arn: Arn; /** * The configuration object's multiplex type. */ MuxType: LiveConnectorMuxType; /** * The media pipeline's composited video. */ CompositedVideo?: CompositedVideoArtifactsConfiguration; /** * The source configuration settings of the media pipeline's configuration object. */ SourceConfiguration?: SourceConfiguration; } export type ClientRequestToken = string; export interface CompositedVideoArtifactsConfiguration { /** * The layout setting, such as GridView in the configuration object. */ Layout?: LayoutOption; /** * The video resolution setting in the configuration object. Default: HD at 1280 x 720. FHD resolution: 1920 x 1080. */ Resolution?: ResolutionOption; /** * The GridView configuration setting. */ GridViewConfiguration: GridViewConfiguration; } export interface CompositedVideoConcatenationConfiguration { /** * Enables or disables the configuration object. */ State: ArtifactsConcatenationState; } export interface ConcatenationSink { /** * The type of data sink in the configuration object. */ Type: ConcatenationSinkType; /** * The configuration settings for an Amazon S3 bucket sink. */ S3BucketSinkConfiguration: S3BucketSinkConfiguration; } export type ConcatenationSinkList = ConcatenationSink[]; export type ConcatenationSinkType = "S3Bucket"|string; export interface ConcatenationSource { /** * The type of concatenation source in a configuration object. */ Type: ConcatenationSourceType; /** * The concatenation settings for the media pipeline in a configuration object. */ MediaCapturePipelineSourceConfiguration: MediaCapturePipelineSourceConfiguration; } export type ConcatenationSourceList = ConcatenationSource[]; export type ConcatenationSourceType = "MediaCapturePipeline"|string; export interface ContentArtifactsConfiguration { /** * Indicates whether the content artifact is enabled or disabled. */ State: ArtifactsState; /** * The MUX type of the artifact configuration. */ MuxType?: ContentMuxType; } export interface ContentConcatenationConfiguration { /** * Enables or disables the configuration object. */ State: ArtifactsConcatenationState; } export type ContentMuxType = "ContentOnly"|string; export type ContentRedactionOutput = "redacted"|"redacted_and_unredacted"|string; export type ContentShareLayoutOption = "PresenterOnly"|"Horizontal"|"Vertical"|"ActiveSpeakerOnly"|string; export type ContentType = "PII"|string; export type CornerRadius = number; export interface CreateMediaCapturePipelineRequest { /** * Source type from which the media artifacts are captured. A Chime SDK Meeting is the only supported source. */ SourceType: MediaPipelineSourceType; /** * ARN of the source from which the media artifacts are captured. */ SourceArn: Arn; /** * Destination type to which the media artifacts are saved. You must use an S3 bucket. */ SinkType: MediaPipelineSinkType; /** * The ARN of the sink type. */ SinkArn: Arn; /** * The unique identifier for the client request. The token makes the API request idempotent. Use a unique token for each media pipeline request. */ ClientRequestToken?: ClientRequestToken; /** * The configuration for a specified media pipeline. SourceType must be ChimeSdkMeeting. */ ChimeSdkMeetingConfiguration?: ChimeSdkMeetingConfiguration; /** * The tag key-value pairs. */ Tags?: TagList; } export interface CreateMediaCapturePipelineResponse { /** * A media pipeline object, the ID, source type, source ARN, sink type, and sink ARN of a media pipeline object. */ MediaCapturePipeline?: MediaCapturePipeline; } export interface CreateMediaConcatenationPipelineRequest { /** * An object that specifies the sources for the media concatenation pipeline. */ Sources: ConcatenationSourceList; /** * An object that specifies the data sinks for the media concatenation pipeline. */ Sinks: ConcatenationSinkList; /** * The unique identifier for the client request. The token makes the API request idempotent. Use a unique token for each media concatenation pipeline request. */ ClientRequestToken?: ClientRequestToken; /** * The tags associated with the media concatenation pipeline. */ Tags?: TagList; } export interface CreateMediaConcatenationPipelineResponse { /** * A media concatenation pipeline object, the ID, source type, MediaPipelineARN, and sink of a media concatenation pipeline object. */ MediaConcatenationPipeline?: MediaConcatenationPipeline; } export interface CreateMediaInsightsPipelineConfigurationRequest { /** * The name of the media insights pipeline configuration. */ MediaInsightsPipelineConfigurationName: MediaInsightsPipelineConfigurationNameString; /** * The ARN of the role used by the service to access Amazon Web Services resources, including Transcribe and Transcribe Call Analytics, on the caller’s behalf. */ ResourceAccessRoleArn: Arn; /** * The configuration settings for the real-time alerts in a media insights pipeline configuration. */ RealTimeAlertConfiguration?: RealTimeAlertConfiguration; /** * The elements in the request, such as a processor for Amazon Transcribe or a sink for a Kinesis Data Stream. */ Elements: MediaInsightsPipelineConfigurationElements; /** * The tags assigned to the media insights pipeline configuration. */ Tags?: TagList; /** * The unique identifier for the media insights pipeline configuration request. */ ClientRequestToken?: ClientRequestToken; } export interface CreateMediaInsightsPipelineConfigurationResponse { /** * The configuration settings for the media insights pipeline. */ MediaInsightsPipelineConfiguration?: MediaInsightsPipelineConfiguration; } export interface CreateMediaInsightsPipelineRequest { /** * The ARN of the pipeline's configuration. */ MediaInsightsPipelineConfigurationArn: Arn; /** * The runtime configuration for the Kinesis video stream source of the media insights pipeline. */ KinesisVideoStreamSourceRuntimeConfiguration?: KinesisVideoStreamSourceRuntimeConfiguration; /** * The runtime metadata for the media insights pipeline. Consists of a key-value map of strings. */ MediaInsightsRuntimeMetadata?: MediaInsightsRuntimeMetadata; /** * The runtime configuration for the Kinesis video recording stream source. */ KinesisVideoStreamRecordingSourceRuntimeConfiguration?: KinesisVideoStreamRecordingSourceRuntimeConfiguration; /** * The runtime configuration for the S3 recording sink. If specified, the settings in this structure override any settings in S3RecordingSinkConfiguration. */ S3RecordingSinkRuntimeConfiguration?: S3RecordingSinkRuntimeConfiguration; /** * The tags assigned to the media insights pipeline. */ Tags?: TagList; /** * The unique identifier for the media insights pipeline request. */ ClientRequestToken?: ClientRequestToken; } export interface CreateMediaInsightsPipelineResponse { /** * The media insights pipeline object. */ MediaInsightsPipeline: MediaInsightsPipeline; } export interface CreateMediaLiveConnectorPipelineRequest { /** * The media live connector pipeline's data sources. */ Sources: LiveConnectorSourceList; /** * The media live connector pipeline's data sinks. */ Sinks: LiveConnectorSinkList; /** * The token assigned to the client making the request. */ ClientRequestToken?: ClientRequestToken; /** * The tags associated with the media live connector pipeline. */ Tags?: TagList; } export interface CreateMediaLiveConnectorPipelineResponse { /** * The new media live connector pipeline. */ MediaLiveConnectorPipeline?: MediaLiveConnectorPipeline; } export interface CreateMediaPipelineKinesisVideoStreamPoolRequest { /** * The configuration settings for the stream. */ StreamConfiguration: KinesisVideoStreamConfiguration; /** * The name of the pool. */ PoolName: KinesisVideoStreamPoolName; /** * The token assigned to the client making the request. */ ClientRequestToken?: ClientRequestToken; /** * The tags assigned to the stream pool. */ Tags?: TagList; } export interface CreateMediaPipelineKinesisVideoStreamPoolResponse { /** * The configuration for applying the streams to the pool. */ KinesisVideoStreamPoolConfiguration?: KinesisVideoStreamPoolConfiguration; } export interface CreateMediaStreamPipelineRequest { /** * The data sources for the media pipeline. */ Sources: MediaStreamSourceList; /** * The data sink for the media pipeline. */ Sinks: MediaStreamSinkList; /** * The token assigned to the client making the request. */ ClientRequestToken?: ClientRequestToken; /** * The tags assigned to the media pipeline. */ Tags?: TagList; } export interface CreateMediaStreamPipelineResponse { /** * The requested media pipeline. */ MediaStreamPipeline?: MediaStreamPipeline; } export interface DataChannelConcatenationConfiguration { /** * Enables or disables the configuration object. */ State: ArtifactsConcatenationState; } export type DataRetentionChangeInHours = number; export type DataRetentionInHours = number; export interface DeleteMediaCapturePipelineRequest { /** * The ID of the media pipeline being deleted. */ MediaPipelineId: GuidString; } export interface DeleteMediaInsightsPipelineConfigurationRequest { /** * The unique identifier of the resource to be deleted. Valid values include the name and ARN of the media insights pipeline configuration. */ Identifier: NonEmptyString; } export interface DeleteMediaPipelineKinesisVideoStreamPoolRequest { /** * The unique identifier of the requested resource. Valid values include the name and ARN of the media pipeline Kinesis Video Stream pool. */ Identifier: NonEmptyString; } export interface DeleteMediaPipelineRequest { /** * The ID of the media pipeline to delete. */ MediaPipelineId: GuidString; } export type ExternalUserIdList = ExternalUserIdType[]; export type ExternalUserIdType = string; export type FragmentNumberString = string; export interface FragmentSelector { /** * The origin of the timestamps to use, Server or Producer. For more information, see StartSelectorType in the Amazon Kinesis Video Streams Developer Guide. */ FragmentSelectorType: FragmentSelectorType; /** * The range of timestamps to return. */ TimestampRange: TimestampRange; } export type FragmentSelectorType = "ProducerTimestamp"|"ServerTimestamp"|string; export interface GetMediaCapturePipelineRequest { /** * The ID of the pipeline that you want to get. */ MediaPipelineId: GuidString; } export interface GetMediaCapturePipelineResponse { /** * The media pipeline object. */ MediaCapturePipeline?: MediaCapturePipeline; } export interface GetMediaInsightsPipelineConfigurationRequest { /** * The unique identifier of the requested resource. Valid values include the name and ARN of the media insights pipeline configuration. */ Identifier: NonEmptyString; } export interface GetMediaInsightsPipelineConfigurationResponse { /** * The requested media insights pipeline configuration. */ MediaInsightsPipelineConfiguration?: MediaInsightsPipelineConfiguration; } export interface GetMediaPipelineKinesisVideoStreamPoolRequest { /** * The unique identifier of the requested resource. Valid values include the name and ARN of the media pipeline Kinesis Video Stream pool. */ Identifier: NonEmptyString; } export interface GetMediaPipelineKinesisVideoStreamPoolResponse { /** * The video stream pool configuration object. */ KinesisVideoStreamPoolConfiguration?: KinesisVideoStreamPoolConfiguration; } export interface GetMediaPipelineRequest { /** * The ID of the pipeline that you want to get. */ MediaPipelineId: GuidString; } export interface GetMediaPipelineResponse { /** * The media pipeline object. */ MediaPipeline?: MediaPipeline; } export interface GetSpeakerSearchTaskRequest { /** * The unique identifier of the resource to be updated. Valid values include the ID and ARN of the media insights pipeline. */ Identifier: NonEmpty