UNPKG

universal-s3

Version:

Universal S3 SDK for JavaScript, available for Node.js backends

1,057 lines (1,043 loc) 200 kB
import {Request} from '../lib/request'; import {Response} from '../lib/response'; import {AWSError} from '../lib/error'; import {Service} from '../lib/service'; import {WaiterConfiguration} from '../lib/service'; import {ServiceConfigurationOptions} from '../lib/service'; import {ConfigBase as Config} from '../lib/config'; interface Blob {} declare class MediaLive extends Service { /** * Constructs a service object. This object has one method for each API operation. */ constructor(options?: MediaLive.Types.ClientConfiguration) config: Config & MediaLive.Types.ClientConfiguration; /** * Update a channel schedule */ batchUpdateSchedule(params: MediaLive.Types.BatchUpdateScheduleRequest, callback?: (err: AWSError, data: MediaLive.Types.BatchUpdateScheduleResponse) => void): Request<MediaLive.Types.BatchUpdateScheduleResponse, AWSError>; /** * Update a channel schedule */ batchUpdateSchedule(callback?: (err: AWSError, data: MediaLive.Types.BatchUpdateScheduleResponse) => void): Request<MediaLive.Types.BatchUpdateScheduleResponse, AWSError>; /** * Creates a new channel */ createChannel(params: MediaLive.Types.CreateChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.CreateChannelResponse) => void): Request<MediaLive.Types.CreateChannelResponse, AWSError>; /** * Creates a new channel */ createChannel(callback?: (err: AWSError, data: MediaLive.Types.CreateChannelResponse) => void): Request<MediaLive.Types.CreateChannelResponse, AWSError>; /** * Create an input */ createInput(params: MediaLive.Types.CreateInputRequest, callback?: (err: AWSError, data: MediaLive.Types.CreateInputResponse) => void): Request<MediaLive.Types.CreateInputResponse, AWSError>; /** * Create an input */ createInput(callback?: (err: AWSError, data: MediaLive.Types.CreateInputResponse) => void): Request<MediaLive.Types.CreateInputResponse, AWSError>; /** * Creates a Input Security Group */ createInputSecurityGroup(params: MediaLive.Types.CreateInputSecurityGroupRequest, callback?: (err: AWSError, data: MediaLive.Types.CreateInputSecurityGroupResponse) => void): Request<MediaLive.Types.CreateInputSecurityGroupResponse, AWSError>; /** * Creates a Input Security Group */ createInputSecurityGroup(callback?: (err: AWSError, data: MediaLive.Types.CreateInputSecurityGroupResponse) => void): Request<MediaLive.Types.CreateInputSecurityGroupResponse, AWSError>; /** * Create tags for a resource */ createTags(params: MediaLive.Types.CreateTagsRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Create tags for a resource */ createTags(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Starts deletion of channel. The associated outputs are also deleted. */ deleteChannel(params: MediaLive.Types.DeleteChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.DeleteChannelResponse) => void): Request<MediaLive.Types.DeleteChannelResponse, AWSError>; /** * Starts deletion of channel. The associated outputs are also deleted. */ deleteChannel(callback?: (err: AWSError, data: MediaLive.Types.DeleteChannelResponse) => void): Request<MediaLive.Types.DeleteChannelResponse, AWSError>; /** * Deletes the input end point */ deleteInput(params: MediaLive.Types.DeleteInputRequest, callback?: (err: AWSError, data: MediaLive.Types.DeleteInputResponse) => void): Request<MediaLive.Types.DeleteInputResponse, AWSError>; /** * Deletes the input end point */ deleteInput(callback?: (err: AWSError, data: MediaLive.Types.DeleteInputResponse) => void): Request<MediaLive.Types.DeleteInputResponse, AWSError>; /** * Deletes an Input Security Group */ deleteInputSecurityGroup(params: MediaLive.Types.DeleteInputSecurityGroupRequest, callback?: (err: AWSError, data: MediaLive.Types.DeleteInputSecurityGroupResponse) => void): Request<MediaLive.Types.DeleteInputSecurityGroupResponse, AWSError>; /** * Deletes an Input Security Group */ deleteInputSecurityGroup(callback?: (err: AWSError, data: MediaLive.Types.DeleteInputSecurityGroupResponse) => void): Request<MediaLive.Types.DeleteInputSecurityGroupResponse, AWSError>; /** * Delete an expired reservation. */ deleteReservation(params: MediaLive.Types.DeleteReservationRequest, callback?: (err: AWSError, data: MediaLive.Types.DeleteReservationResponse) => void): Request<MediaLive.Types.DeleteReservationResponse, AWSError>; /** * Delete an expired reservation. */ deleteReservation(callback?: (err: AWSError, data: MediaLive.Types.DeleteReservationResponse) => void): Request<MediaLive.Types.DeleteReservationResponse, AWSError>; /** * Delete all schedule actions on a channel. */ deleteSchedule(params: MediaLive.Types.DeleteScheduleRequest, callback?: (err: AWSError, data: MediaLive.Types.DeleteScheduleResponse) => void): Request<MediaLive.Types.DeleteScheduleResponse, AWSError>; /** * Delete all schedule actions on a channel. */ deleteSchedule(callback?: (err: AWSError, data: MediaLive.Types.DeleteScheduleResponse) => void): Request<MediaLive.Types.DeleteScheduleResponse, AWSError>; /** * Removes tags for a resource */ deleteTags(params: MediaLive.Types.DeleteTagsRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Removes tags for a resource */ deleteTags(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>; /** * Gets details about a channel */ describeChannel(params: MediaLive.Types.DescribeChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Gets details about a channel */ describeChannel(callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Produces details about an input */ describeInput(params: MediaLive.Types.DescribeInputRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeInputResponse) => void): Request<MediaLive.Types.DescribeInputResponse, AWSError>; /** * Produces details about an input */ describeInput(callback?: (err: AWSError, data: MediaLive.Types.DescribeInputResponse) => void): Request<MediaLive.Types.DescribeInputResponse, AWSError>; /** * Produces a summary of an Input Security Group */ describeInputSecurityGroup(params: MediaLive.Types.DescribeInputSecurityGroupRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeInputSecurityGroupResponse) => void): Request<MediaLive.Types.DescribeInputSecurityGroupResponse, AWSError>; /** * Produces a summary of an Input Security Group */ describeInputSecurityGroup(callback?: (err: AWSError, data: MediaLive.Types.DescribeInputSecurityGroupResponse) => void): Request<MediaLive.Types.DescribeInputSecurityGroupResponse, AWSError>; /** * Get details for an offering. */ describeOffering(params: MediaLive.Types.DescribeOfferingRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeOfferingResponse) => void): Request<MediaLive.Types.DescribeOfferingResponse, AWSError>; /** * Get details for an offering. */ describeOffering(callback?: (err: AWSError, data: MediaLive.Types.DescribeOfferingResponse) => void): Request<MediaLive.Types.DescribeOfferingResponse, AWSError>; /** * Get details for a reservation. */ describeReservation(params: MediaLive.Types.DescribeReservationRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeReservationResponse) => void): Request<MediaLive.Types.DescribeReservationResponse, AWSError>; /** * Get details for a reservation. */ describeReservation(callback?: (err: AWSError, data: MediaLive.Types.DescribeReservationResponse) => void): Request<MediaLive.Types.DescribeReservationResponse, AWSError>; /** * Get a channel schedule */ describeSchedule(params: MediaLive.Types.DescribeScheduleRequest, callback?: (err: AWSError, data: MediaLive.Types.DescribeScheduleResponse) => void): Request<MediaLive.Types.DescribeScheduleResponse, AWSError>; /** * Get a channel schedule */ describeSchedule(callback?: (err: AWSError, data: MediaLive.Types.DescribeScheduleResponse) => void): Request<MediaLive.Types.DescribeScheduleResponse, AWSError>; /** * Produces list of channels that have been created */ listChannels(params: MediaLive.Types.ListChannelsRequest, callback?: (err: AWSError, data: MediaLive.Types.ListChannelsResponse) => void): Request<MediaLive.Types.ListChannelsResponse, AWSError>; /** * Produces list of channels that have been created */ listChannels(callback?: (err: AWSError, data: MediaLive.Types.ListChannelsResponse) => void): Request<MediaLive.Types.ListChannelsResponse, AWSError>; /** * Produces a list of Input Security Groups for an account */ listInputSecurityGroups(params: MediaLive.Types.ListInputSecurityGroupsRequest, callback?: (err: AWSError, data: MediaLive.Types.ListInputSecurityGroupsResponse) => void): Request<MediaLive.Types.ListInputSecurityGroupsResponse, AWSError>; /** * Produces a list of Input Security Groups for an account */ listInputSecurityGroups(callback?: (err: AWSError, data: MediaLive.Types.ListInputSecurityGroupsResponse) => void): Request<MediaLive.Types.ListInputSecurityGroupsResponse, AWSError>; /** * Produces list of inputs that have been created */ listInputs(params: MediaLive.Types.ListInputsRequest, callback?: (err: AWSError, data: MediaLive.Types.ListInputsResponse) => void): Request<MediaLive.Types.ListInputsResponse, AWSError>; /** * Produces list of inputs that have been created */ listInputs(callback?: (err: AWSError, data: MediaLive.Types.ListInputsResponse) => void): Request<MediaLive.Types.ListInputsResponse, AWSError>; /** * List offerings available for purchase. */ listOfferings(params: MediaLive.Types.ListOfferingsRequest, callback?: (err: AWSError, data: MediaLive.Types.ListOfferingsResponse) => void): Request<MediaLive.Types.ListOfferingsResponse, AWSError>; /** * List offerings available for purchase. */ listOfferings(callback?: (err: AWSError, data: MediaLive.Types.ListOfferingsResponse) => void): Request<MediaLive.Types.ListOfferingsResponse, AWSError>; /** * List purchased reservations. */ listReservations(params: MediaLive.Types.ListReservationsRequest, callback?: (err: AWSError, data: MediaLive.Types.ListReservationsResponse) => void): Request<MediaLive.Types.ListReservationsResponse, AWSError>; /** * List purchased reservations. */ listReservations(callback?: (err: AWSError, data: MediaLive.Types.ListReservationsResponse) => void): Request<MediaLive.Types.ListReservationsResponse, AWSError>; /** * Produces list of tags that have been created for a resource */ listTagsForResource(params: MediaLive.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: MediaLive.Types.ListTagsForResourceResponse) => void): Request<MediaLive.Types.ListTagsForResourceResponse, AWSError>; /** * Produces list of tags that have been created for a resource */ listTagsForResource(callback?: (err: AWSError, data: MediaLive.Types.ListTagsForResourceResponse) => void): Request<MediaLive.Types.ListTagsForResourceResponse, AWSError>; /** * Purchase an offering and create a reservation. */ purchaseOffering(params: MediaLive.Types.PurchaseOfferingRequest, callback?: (err: AWSError, data: MediaLive.Types.PurchaseOfferingResponse) => void): Request<MediaLive.Types.PurchaseOfferingResponse, AWSError>; /** * Purchase an offering and create a reservation. */ purchaseOffering(callback?: (err: AWSError, data: MediaLive.Types.PurchaseOfferingResponse) => void): Request<MediaLive.Types.PurchaseOfferingResponse, AWSError>; /** * Starts an existing channel */ startChannel(params: MediaLive.Types.StartChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.StartChannelResponse) => void): Request<MediaLive.Types.StartChannelResponse, AWSError>; /** * Starts an existing channel */ startChannel(callback?: (err: AWSError, data: MediaLive.Types.StartChannelResponse) => void): Request<MediaLive.Types.StartChannelResponse, AWSError>; /** * Stops a running channel */ stopChannel(params: MediaLive.Types.StopChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.StopChannelResponse) => void): Request<MediaLive.Types.StopChannelResponse, AWSError>; /** * Stops a running channel */ stopChannel(callback?: (err: AWSError, data: MediaLive.Types.StopChannelResponse) => void): Request<MediaLive.Types.StopChannelResponse, AWSError>; /** * Updates a channel. */ updateChannel(params: MediaLive.Types.UpdateChannelRequest, callback?: (err: AWSError, data: MediaLive.Types.UpdateChannelResponse) => void): Request<MediaLive.Types.UpdateChannelResponse, AWSError>; /** * Updates a channel. */ updateChannel(callback?: (err: AWSError, data: MediaLive.Types.UpdateChannelResponse) => void): Request<MediaLive.Types.UpdateChannelResponse, AWSError>; /** * Changes the class of the channel. */ updateChannelClass(params: MediaLive.Types.UpdateChannelClassRequest, callback?: (err: AWSError, data: MediaLive.Types.UpdateChannelClassResponse) => void): Request<MediaLive.Types.UpdateChannelClassResponse, AWSError>; /** * Changes the class of the channel. */ updateChannelClass(callback?: (err: AWSError, data: MediaLive.Types.UpdateChannelClassResponse) => void): Request<MediaLive.Types.UpdateChannelClassResponse, AWSError>; /** * Updates an input. */ updateInput(params: MediaLive.Types.UpdateInputRequest, callback?: (err: AWSError, data: MediaLive.Types.UpdateInputResponse) => void): Request<MediaLive.Types.UpdateInputResponse, AWSError>; /** * Updates an input. */ updateInput(callback?: (err: AWSError, data: MediaLive.Types.UpdateInputResponse) => void): Request<MediaLive.Types.UpdateInputResponse, AWSError>; /** * Update an Input Security Group's Whilelists. */ updateInputSecurityGroup(params: MediaLive.Types.UpdateInputSecurityGroupRequest, callback?: (err: AWSError, data: MediaLive.Types.UpdateInputSecurityGroupResponse) => void): Request<MediaLive.Types.UpdateInputSecurityGroupResponse, AWSError>; /** * Update an Input Security Group's Whilelists. */ updateInputSecurityGroup(callback?: (err: AWSError, data: MediaLive.Types.UpdateInputSecurityGroupResponse) => void): Request<MediaLive.Types.UpdateInputSecurityGroupResponse, AWSError>; /** * Update reservation. */ updateReservation(params: MediaLive.Types.UpdateReservationRequest, callback?: (err: AWSError, data: MediaLive.Types.UpdateReservationResponse) => void): Request<MediaLive.Types.UpdateReservationResponse, AWSError>; /** * Update reservation. */ updateReservation(callback?: (err: AWSError, data: MediaLive.Types.UpdateReservationResponse) => void): Request<MediaLive.Types.UpdateReservationResponse, AWSError>; /** * Waits for the channelCreated state by periodically calling the underlying MediaLive.describeChanneloperation every 3 seconds (at most 5 times). Wait until a channel has been created */ waitFor(state: "channelCreated", params: MediaLive.Types.DescribeChannelRequest & {$waiter?: WaiterConfiguration}, callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelCreated state by periodically calling the underlying MediaLive.describeChanneloperation every 3 seconds (at most 5 times). Wait until a channel has been created */ waitFor(state: "channelCreated", callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelRunning state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 120 times). Wait until a channel is running */ waitFor(state: "channelRunning", params: MediaLive.Types.DescribeChannelRequest & {$waiter?: WaiterConfiguration}, callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelRunning state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 120 times). Wait until a channel is running */ waitFor(state: "channelRunning", callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelStopped state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 28 times). Wait until a channel has is stopped */ waitFor(state: "channelStopped", params: MediaLive.Types.DescribeChannelRequest & {$waiter?: WaiterConfiguration}, callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelStopped state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 28 times). Wait until a channel has is stopped */ waitFor(state: "channelStopped", callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelDeleted state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 20 times). Wait until a channel has been deleted */ waitFor(state: "channelDeleted", params: MediaLive.Types.DescribeChannelRequest & {$waiter?: WaiterConfiguration}, callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; /** * Waits for the channelDeleted state by periodically calling the underlying MediaLive.describeChanneloperation every 5 seconds (at most 20 times). Wait until a channel has been deleted */ waitFor(state: "channelDeleted", callback?: (err: AWSError, data: MediaLive.Types.DescribeChannelResponse) => void): Request<MediaLive.Types.DescribeChannelResponse, AWSError>; } declare namespace MediaLive { export type AacCodingMode = "AD_RECEIVER_MIX"|"CODING_MODE_1_0"|"CODING_MODE_1_1"|"CODING_MODE_2_0"|"CODING_MODE_5_1"|string; export type AacInputType = "BROADCASTER_MIXED_AD"|"NORMAL"|string; export type AacProfile = "HEV1"|"HEV2"|"LC"|string; export type AacRateControlMode = "CBR"|"VBR"|string; export type AacRawFormat = "LATM_LOAS"|"NONE"|string; export interface AacSettings { /** * Average bitrate in bits/second. Valid values depend on rate control mode and profile. */ Bitrate?: __double; /** * Mono, Stereo, or 5.1 channel layout. Valid values depend on rate control mode and profile. The adReceiverMix setting receives a stereo description plus control track and emits a mono AAC encode of the description track, with control data emitted in the PES header as per ETSI TS 101 154 Annex E. */ CodingMode?: AacCodingMode; /** * Set to "broadcasterMixedAd" when input contains pre-mixed main audio + AD (narration) as a stereo pair. The Audio Type field (audioType) will be set to 3, which signals to downstream systems that this stream contains "broadcaster mixed AD". Note that the input received by the encoder must contain pre-mixed audio; the encoder does not perform the mixing. The values in audioTypeControl and audioType (in AudioDescription) are ignored when set to broadcasterMixedAd. Leave set to "normal" when input does not contain pre-mixed audio + AD. */ InputType?: AacInputType; /** * AAC Profile. */ Profile?: AacProfile; /** * Rate Control Mode. */ RateControlMode?: AacRateControlMode; /** * Sets LATM / LOAS AAC output for raw containers. */ RawFormat?: AacRawFormat; /** * Sample rate in Hz. Valid values depend on rate control mode and profile. */ SampleRate?: __double; /** * Use MPEG-2 AAC audio instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream containers. */ Spec?: AacSpec; /** * VBR Quality Level - Only used if rateControlMode is VBR. */ VbrQuality?: AacVbrQuality; } export type AacSpec = "MPEG2"|"MPEG4"|string; export type AacVbrQuality = "HIGH"|"LOW"|"MEDIUM_HIGH"|"MEDIUM_LOW"|string; export type Ac3BitstreamMode = "COMMENTARY"|"COMPLETE_MAIN"|"DIALOGUE"|"EMERGENCY"|"HEARING_IMPAIRED"|"MUSIC_AND_EFFECTS"|"VISUALLY_IMPAIRED"|"VOICE_OVER"|string; export type Ac3CodingMode = "CODING_MODE_1_0"|"CODING_MODE_1_1"|"CODING_MODE_2_0"|"CODING_MODE_3_2_LFE"|string; export type Ac3DrcProfile = "FILM_STANDARD"|"NONE"|string; export type Ac3LfeFilter = "DISABLED"|"ENABLED"|string; export type Ac3MetadataControl = "FOLLOW_INPUT"|"USE_CONFIGURED"|string; export interface Ac3Settings { /** * Average bitrate in bits/second. Valid bitrates depend on the coding mode. */ Bitrate?: __double; /** * Specifies the bitstream mode (bsmod) for the emitted AC-3 stream. See ATSC A/52-2012 for background on these values. */ BitstreamMode?: Ac3BitstreamMode; /** * Dolby Digital coding mode. Determines number of channels. */ CodingMode?: Ac3CodingMode; /** * Sets the dialnorm for the output. If excluded and input audio is Dolby Digital, dialnorm will be passed through. */ Dialnorm?: __integerMin1Max31; /** * If set to filmStandard, adds dynamic range compression signaling to the output bitstream as defined in the Dolby Digital specification. */ DrcProfile?: Ac3DrcProfile; /** * When set to enabled, applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only valid in codingMode32Lfe mode. */ LfeFilter?: Ac3LfeFilter; /** * When set to "followInput", encoder metadata will be sourced from the DD, DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied from one of these streams, then the static metadata settings will be used. */ MetadataControl?: Ac3MetadataControl; } export type AfdSignaling = "AUTO"|"FIXED"|"NONE"|string; export interface ArchiveContainerSettings { M2tsSettings?: M2tsSettings; } export interface ArchiveGroupSettings { /** * A directory and base filename where archive files should be written. */ Destination: OutputLocationRef; /** * Number of seconds to write to archive file before closing and starting a new one. */ RolloverInterval?: __integerMin1; } export interface ArchiveOutputSettings { /** * Settings specific to the container type of the file. */ ContainerSettings: ArchiveContainerSettings; /** * Output file extension. If excluded, this will be auto-selected from the container type. */ Extension?: __string; /** * String concatenated to the end of the destination filename. Required for multiple outputs of the same type. */ NameModifier?: __string; } export interface AribDestinationSettings { } export interface AribSourceSettings { } export interface AudioChannelMapping { /** * Indices and gain values for each input channel that should be remixed into this output channel. */ InputChannelLevels: __listOfInputChannelLevel; /** * The index of the output channel being produced. */ OutputChannel: __integerMin0Max7; } export interface AudioCodecSettings { AacSettings?: AacSettings; Ac3Settings?: Ac3Settings; Eac3Settings?: Eac3Settings; Mp2Settings?: Mp2Settings; PassThroughSettings?: PassThroughSettings; } export interface AudioDescription { /** * Advanced audio normalization settings. */ AudioNormalizationSettings?: AudioNormalizationSettings; /** * The name of the AudioSelector used as the source for this AudioDescription. */ AudioSelectorName: __string; /** * Applies only if audioTypeControl is useConfigured. The values for audioType are defined in ISO-IEC 13818-1. */ AudioType?: AudioType; /** * Determines how audio type is determined. followInput: If the input contains an ISO 639 audioType, then that value is passed through to the output. If the input contains no ISO 639 audioType, the value in Audio Type is included in the output. useConfigured: The value in Audio Type is included in the output. Note that this field and audioType are both ignored if inputType is broadcasterMixedAd. */ AudioTypeControl?: AudioDescriptionAudioTypeControl; /** * Audio codec settings. */ CodecSettings?: AudioCodecSettings; /** * Indicates the language of the audio output track. Only used if languageControlMode is useConfigured, or there is no ISO 639 language code specified in the input. */ LanguageCode?: __stringMin3Max3; /** * Choosing followInput will cause the ISO 639 language code of the output to follow the ISO 639 language code of the input. The languageCode will be used when useConfigured is set, or when followInput is selected but there is no ISO 639 language code specified by the input. */ LanguageCodeControl?: AudioDescriptionLanguageCodeControl; /** * The name of this AudioDescription. Outputs will use this name to uniquely identify this AudioDescription. Description names should be unique within this Live Event. */ Name: __string; /** * Settings that control how input audio channels are remixed into the output audio channels. */ RemixSettings?: RemixSettings; /** * Used for MS Smooth and Apple HLS outputs. Indicates the name displayed by the player (eg. English, or Director Commentary). */ StreamName?: __string; } export type AudioDescriptionAudioTypeControl = "FOLLOW_INPUT"|"USE_CONFIGURED"|string; export type AudioDescriptionLanguageCodeControl = "FOLLOW_INPUT"|"USE_CONFIGURED"|string; export interface AudioLanguageSelection { /** * Selects a specific three-letter language code from within an audio source. */ LanguageCode: __string; /** * When set to "strict", the transport stream demux strictly identifies audio streams by their language descriptor. If a PMT update occurs such that an audio stream matching the initially selected language is no longer present then mute will be encoded until the language returns. If "loose", then on a PMT update the demux will choose another audio stream in the program with the same stream type if it can't find one with the same language. */ LanguageSelectionPolicy?: AudioLanguageSelectionPolicy; } export type AudioLanguageSelectionPolicy = "LOOSE"|"STRICT"|string; export type AudioNormalizationAlgorithm = "ITU_1770_1"|"ITU_1770_2"|string; export type AudioNormalizationAlgorithmControl = "CORRECT_AUDIO"|string; export interface AudioNormalizationSettings { /** * Audio normalization algorithm to use. itu17701 conforms to the CALM Act specification, itu17702 conforms to the EBU R-128 specification. */ Algorithm?: AudioNormalizationAlgorithm; /** * When set to correctAudio the output audio is corrected using the chosen algorithm. If set to measureOnly, the audio will be measured but not adjusted. */ AlgorithmControl?: AudioNormalizationAlgorithmControl; /** * Target LKFS(loudness) to adjust volume to. If no value is entered, a default value will be used according to the chosen algorithm. The CALM Act (1770-1) recommends a target of -24 LKFS. The EBU R-128 specification (1770-2) recommends a target of -23 LKFS. */ TargetLkfs?: __doubleMinNegative59Max0; } export interface AudioOnlyHlsSettings { /** * Specifies the group to which the audio Rendition belongs. */ AudioGroupId?: __string; /** * Optional. Specifies the .jpg or .png image to use as the cover art for an audio-only output. We recommend a low bit-size file because the image increases the output audio bandwidth. The image is attached to the audio as an ID3 tag, frame type APIC, picture type 0x10, as per the "ID3 tag version 2.4.0 - Native Frames" standard. */ AudioOnlyImage?: InputLocation; /** * Four types of audio-only tracks are supported: Audio-Only Variant Stream The client can play back this audio-only stream instead of video in low-bandwidth scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate Audio, Auto Select, Default Alternate rendition that the client should try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=YES, AUTOSELECT=YES Alternate Audio, Auto Select, Not Default Alternate rendition that the client may try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=YES Alternate Audio, not Auto Select Alternate rendition that the client will not try to play back by default. Represented as an EXT-X-MEDIA in the HLS manifest with DEFAULT=NO, AUTOSELECT=NO */ AudioTrackType?: AudioOnlyHlsTrackType; } export type AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_AUTO_SELECT"|"ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT"|"ALTERNATE_AUDIO_NOT_AUTO_SELECT"|"AUDIO_ONLY_VARIANT_STREAM"|string; export interface AudioPidSelection { /** * Selects a specific PID from within a source. */ Pid: __integerMin0Max8191; } export interface AudioSelector { /** * The name of this AudioSelector. AudioDescriptions will use this name to uniquely identify this Selector. Selector names should be unique per input. */ Name: __stringMin1; /** * The audio selector settings. */ SelectorSettings?: AudioSelectorSettings; } export interface AudioSelectorSettings { AudioLanguageSelection?: AudioLanguageSelection; AudioPidSelection?: AudioPidSelection; } export type AudioType = "CLEAN_EFFECTS"|"HEARING_IMPAIRED"|"UNDEFINED"|"VISUAL_IMPAIRED_COMMENTARY"|string; export type AuthenticationScheme = "AKAMAI"|"COMMON"|string; export interface AvailBlanking { /** * Blanking image to be used. Leave empty for solid black. Only bmp and png images are supported. */ AvailBlankingImage?: InputLocation; /** * When set to enabled, causes video, audio and captions to be blanked when insertion metadata is added. */ State?: AvailBlankingState; } export type AvailBlankingState = "DISABLED"|"ENABLED"|string; export interface AvailConfiguration { /** * Ad avail settings. */ AvailSettings?: AvailSettings; } export interface AvailSettings { Scte35SpliceInsert?: Scte35SpliceInsert; Scte35TimeSignalApos?: Scte35TimeSignalApos; } export interface BatchScheduleActionCreateRequest { /** * A list of schedule actions to create. */ ScheduleActions: __listOfScheduleAction; } export interface BatchScheduleActionCreateResult { /** * List of actions that have been created in the schedule. */ ScheduleActions: __listOfScheduleAction; } export interface BatchScheduleActionDeleteRequest { /** * A list of schedule actions to delete. */ ActionNames: __listOf__string; } export interface BatchScheduleActionDeleteResult { /** * List of actions that have been deleted from the schedule. */ ScheduleActions: __listOfScheduleAction; } export interface BatchUpdateScheduleRequest { /** * Id of the channel whose schedule is being updated. */ ChannelId: __string; /** * Schedule actions to create in the schedule. */ Creates?: BatchScheduleActionCreateRequest; /** * Schedule actions to delete from the schedule. */ Deletes?: BatchScheduleActionDeleteRequest; } export interface BatchUpdateScheduleResponse { /** * Schedule actions created in the schedule. */ Creates?: BatchScheduleActionCreateResult; /** * Schedule actions deleted from the schedule. */ Deletes?: BatchScheduleActionDeleteResult; } export interface BlackoutSlate { /** * Blackout slate image to be used. Leave empty for solid black. Only bmp and png images are supported. */ BlackoutSlateImage?: InputLocation; /** * Setting to enabled causes the encoder to blackout the video, audio, and captions, and raise the "Network Blackout Image" slate when an SCTE104/35 Network End Segmentation Descriptor is encountered. The blackout will be lifted when the Network Start Segmentation Descriptor is encountered. The Network End and Network Start descriptors must contain a network ID that matches the value entered in "Network ID". */ NetworkEndBlackout?: BlackoutSlateNetworkEndBlackout; /** * Path to local file to use as Network End Blackout image. Image will be scaled to fill the entire output raster. */ NetworkEndBlackoutImage?: InputLocation; /** * Provides Network ID that matches EIDR ID format (e.g., "10.XXXX/XXXX-XXXX-XXXX-XXXX-XXXX-C"). */ NetworkId?: __stringMin34Max34; /** * When set to enabled, causes video, audio and captions to be blanked when indicated by program metadata. */ State?: BlackoutSlateState; } export type BlackoutSlateNetworkEndBlackout = "DISABLED"|"ENABLED"|string; export type BlackoutSlateState = "DISABLED"|"ENABLED"|string; export type BurnInAlignment = "CENTERED"|"LEFT"|"SMART"|string; export type BurnInBackgroundColor = "BLACK"|"NONE"|"WHITE"|string; export interface BurnInDestinationSettings { /** * If no explicit xPosition or yPosition is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Selecting "smart" justification will left-justify live subtitles and center-justify pre-recorded subtitles. All burn-in and DVB-Sub font settings must match. */ Alignment?: BurnInAlignment; /** * Specifies the color of the rectangle behind the captions. All burn-in and DVB-Sub font settings must match. */ BackgroundColor?: BurnInBackgroundColor; /** * Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. */ BackgroundOpacity?: __integerMin0Max255; /** * External font file used for caption burn-in. File extension must be 'ttf' or 'tte'. Although the user can select output fonts for many different types of input captions, embedded, STL and teletext sources use a strict grid system. Using external fonts with these caption sources could cause unexpected display of proportional fonts. All burn-in and DVB-Sub font settings must match. */ Font?: InputLocation; /** * Specifies the color of the burned-in captions. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. */ FontColor?: BurnInFontColor; /** * Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent. All burn-in and DVB-Sub font settings must match. */ FontOpacity?: __integerMin0Max255; /** * Font resolution in DPI (dots per inch); default is 96 dpi. All burn-in and DVB-Sub font settings must match. */ FontResolution?: __integerMin96Max600; /** * When set to 'auto' fontSize will scale depending on the size of the output. Giving a positive integer will specify the exact font size in points. All burn-in and DVB-Sub font settings must match. */ FontSize?: __string; /** * Specifies font outline color. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. */ OutlineColor?: BurnInOutlineColor; /** * Specifies font outline size in pixels. This option is not valid for source captions that are either 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match. */ OutlineSize?: __integerMin0Max10; /** * Specifies the color of the shadow cast by the captions. All burn-in and DVB-Sub font settings must match. */ ShadowColor?: BurnInShadowColor; /** * Specifies the opacity of the shadow. 255 is opaque; 0 is transparent. Leaving this parameter out is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match. */ ShadowOpacity?: __integerMin0Max255; /** * Specifies the horizontal offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. All burn-in and DVB-Sub font settings must match. */ ShadowXOffset?: __integer; /** * Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match. */ ShadowYOffset?: __integer; /** * Controls whether a fixed grid size will be used to generate the output subtitles bitmap. Only applicable for Teletext inputs and DVB-Sub/Burn-in outputs. */ TeletextGridControl?: BurnInTeletextGridControl; /** * Specifies the horizontal position of the caption relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit xPosition is provided, the horizontal caption position will be determined by the alignment parameter. All burn-in and DVB-Sub font settings must match. */ XPosition?: __integerMin0; /** * Specifies the vertical position of the caption relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit yPosition is provided, the caption will be positioned towards the bottom of the output. All burn-in and DVB-Sub font settings must match. */ YPosition?: __integerMin0; } export type BurnInFontColor = "BLACK"|"BLUE"|"GREEN"|"RED"|"WHITE"|"YELLOW"|string; export type BurnInOutlineColor = "BLACK"|"BLUE"|"GREEN"|"RED"|"WHITE"|"YELLOW"|string; export type BurnInShadowColor = "BLACK"|"NONE"|"WHITE"|string; export type BurnInTeletextGridControl = "FIXED"|"SCALED"|string; export interface CaptionDescription { /** * Specifies which input caption selector to use as a caption source when generating output captions. This field should match a captionSelector name. */ CaptionSelectorName: __string; /** * Additional settings for captions destination that depend on the destination type. */ DestinationSettings?: CaptionDestinationSettings; /** * ISO 639-2 three-digit code: http://www.loc.gov/standards/iso639-2/ */ LanguageCode?: __string; /** * Human readable information to indicate captions available for players (eg. English, or Spanish). */ LanguageDescription?: __string; /** * Name of the caption description. Used to associate a caption description with an output. Names must be unique within an event. */ Name: __string; } export interface CaptionDestinationSettings { AribDestinationSettings?: AribDestinationSettings; BurnInDestinationSettings?: BurnInDestinationSettings; DvbSubDestinationSettings?: DvbSubDestinationSettings; EmbeddedDestinationSettings?: EmbeddedDestinationSettings; EmbeddedPlusScte20DestinationSettings?: EmbeddedPlusScte20DestinationSettings; RtmpCaptionInfoDestinationSettings?: RtmpCaptionInfoDestinationSettings; Scte20PlusEmbeddedDestinationSettings?: Scte20PlusEmbeddedDestinationSettings; Scte27DestinationSettings?: Scte27DestinationSettings; SmpteTtDestinationSettings?: SmpteTtDestinationSettings; TeletextDestinationSettings?: TeletextDestinationSettings; TtmlDestinationSettings?: TtmlDestinationSettings; WebvttDestinationSettings?: WebvttDestinationSettings; } export interface CaptionLanguageMapping { /** * The closed caption channel being described by this CaptionLanguageMapping. Each channel mapping must have a unique channel number (maximum of 4) */ CaptionChannel: __integerMin1Max4; /** * Three character ISO 639-2 language code (see http://www.loc.gov/standards/iso639-2) */ LanguageCode: __stringMin3Max3; /** * Textual description of language */ LanguageDescription: __stringMin1; } export interface CaptionSelector { /** * When specified this field indicates the three letter language code of the caption track to extract from the source. */ LanguageCode?: __string; /** * Name identifier for a caption selector. This name is used to associate this caption selector with one or more caption descriptions. Names must be unique within an event. */ Name: __stringMin1; /** * Caption selector settings. */ SelectorSettings?: CaptionSelectorSettings; } export interface CaptionSelectorSettings { AribSourceSettings?: AribSourceSettings; DvbSubSourceSettings?: DvbSubSourceSettings; EmbeddedSourceSettings?: EmbeddedSourceSettings; Scte20SourceSettings?: Scte20SourceSettings; Scte27SourceSettings?: Scte27SourceSettings; TeletextSourceSettings?: TeletextSourceSettings; } export interface Channel { /** * The unique arn of the channel. */ Arn?: __string; /** * The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. */ ChannelClass?: ChannelClass; /** * A list of destinations of the channel. For UDP outputs, there is one destination per output. For other types (HLS, for example), there is one destination per packager. */ Destinations?: __listOfOutputDestination; /** * The endpoints where outgoing connections initiate from */ EgressEndpoints?: __listOfChannelEgressEndpoint; EncoderSettings?: EncoderSettings; /** * The unique id of the channel. */ Id?: __string; /** * List of input attachments for channel. */ InputAttachments?: __listOfInputAttachment; InputSpecification?: InputSpecification; /** * The log level being written to CloudWatch Logs. */ LogLevel?: LogLevel; /** * The name of the channel. (user-mutable) */ Name?: __string; /** * Runtime details for the pipelines of a running channel. */ PipelineDetails?: __listOfPipelineDetail; /** * The number of currently healthy pipelines. */ PipelinesRunningCount?: __integer; /** * The Amazon Resource Name (ARN) of the role assumed when running the Channel. */ RoleArn?: __string; State?: ChannelState; /** * A collection of key-value pairs. */ Tags?: Tags; } export type ChannelClass = "STANDARD"|"SINGLE_PIPELINE"|string; export interface ChannelEgressEndpoint { /** * Public IP of where a channel's output comes from */ SourceIp?: __string; } export type ChannelState = "CREATING"|"CREATE_FAILED"|"IDLE"|"STARTING"|"RUNNING"|"RECOVERING"|"STOPPING"|"DELETING"|"DELETED"|"UPDATING"|"UPDATE_FAILED"|string; export interface ChannelSummary { /** * The unique arn of the channel. */ Arn?: __string; /** * The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. */ ChannelClass?: ChannelClass; /** * A list of destinations of the channel. For UDP outputs, there is one destination per output. For other types (HLS, for example), there is one destination per packager. */ Destinations?: __listOfOutputDestination; /** * The endpoints where outgoing connections initiate from */ EgressEndpoints?: __listOfChannelEgressEndpoint; /** * The unique id of the channel. */ Id?: __string; /** * List of input attachments for channel. */ InputAttachments?: __listOfInputAttachment; InputSpecification?: InputSpecification; /** * The log level being written to CloudWatch Logs. */ LogLevel?: LogLevel; /** * The name of the channel. (user-mutable) */ Name?: __string; /** * The number of currently healthy pipelines. */ PipelinesRunningCount?: __integer; /** * The Amazon Resource Name (ARN) of the role assumed when running the Channel. */ RoleArn?: __string; State?: ChannelState; /** * A collection of key-value pairs. */ Tags?: Tags; } export interface ColorSpacePassthroughSettings { } export interface CreateChannelRequest { /** * The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. */ ChannelClass?: ChannelClass; Destinations?: __listOfOutputDestination; EncoderSettings?: EncoderSettings; /** * List of input attachments for channel. */ InputAttachments?: __listOfInputAttachment; /** * Specification of input for this channel (max. bitrate, resolution, codec, etc.) */ InputSpecification?: InputSpecification; /** * The log level to write to CloudWatch Logs. */ LogLevel?: LogLevel; /** * Name of channel. */ Name?: __string; /** * Unique request ID to be specified. This is needed to prevent retries from creating multiple resources. */ RequestId?: __string; /** * Deprecated field that's only usable by whitelisted customers. */ Reserved?: __string; /** * An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. */ RoleArn?: __string; /** * A collection of key-value pairs. */ Tags?: Tags; } export interface CreateChannelResponse { Channel?: Channel; } export interface CreateInputRequest { /** * Destination settings for PUSH type inputs. */ Destinations?: __listOfInputDestinationRequest; /** * A list of security groups referenced by IDs to attach to the input. */ InputSecurityGroups?: __listOf__string; /** * A list of the MediaConnect Flows that you want to use in this input. You can specify as few as one Flow and presently, as many as two. The only requirement is when you have more than one is that each Flow is in a separate Availability Zone as this ensures your EML input is redundant to AZ issues. */ MediaConnectFlows?: __listOfMediaConnectFlowRequest; /** * Name of the input. */ Name?: __string; /** * Unique identifier of the request to ensure the request is handled exactly once in case of retries. */ RequestId?: __string; /** * The Amazon Resource Name (ARN) of the role this input assumes during and after creation. */ RoleArn?: __string; /** * The source URLs for a PULL-type input. Every PULL type input needs exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. */ Sources?: __listOfInputSourceRequest; /** * A collection of key-value pairs. */ Tags?: Tags; Type?: InputType; Vpc?: InputVpcRequest; } export interface CreateInputResponse { Input?: Input; } export interface CreateInputSecurityGroupRequest { /** * A collection of key-value pairs. */ Tags?: Tags; /** * List of IPv4 CIDR addresses to whitelist */ WhitelistRules?: __listOfInputWhitelistRuleCidr; } export interface CreateInputSecurityGroupResponse { SecurityGroup?: InputSecurityGroup; } export interface CreateTagsRequest { ResourceArn: __string; Tags?: Tags; } export interface DeleteChannelRequest { /** * Unique ID of the channel. */ ChannelId: __string; } export interface DeleteChannelResponse { /** * The unique arn of the channel. */ Arn?: __string; /** * The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline. */ ChannelClass?: ChannelClass; /** * A list of destinations of the channel. For UDP outputs, there is one destination per output. For other types (HLS, for example), there is one destination per packager. */ Destinations?: __listOfOutputDestination; /** * The endpoints where outgoing connections initiate from */ EgressEndpoints?: __listOfChannelEgressEndpoint; EncoderSettings?: EncoderSettings; /** * The unique id of the channel. */ Id?: __string; /** * List of input attachments for channel. */ InputAttachments?: __listOfInputAttachment; InputSpecification?: InputSpecification; /**