@pulumi/aws-native
Version:
The Pulumi AWS Cloud Control Provider enables you to build, deploy, and manage [any AWS resource that's supported by the AWS Cloud Control API](https://github.com/pulumi/pulumi-aws-native/blob/master/provider/cmd/pulumi-gen-aws-native/supported-types.txt)
152 lines (151 loc) • 12.3 kB
TypeScript
import * as pulumi from "@pulumi/pulumi";
import * as inputs from "../types/input";
import * as outputs from "../types/output";
/**
* The AWS::Rekognition::StreamProcessor type is used to create an Amazon Rekognition StreamProcessor that you can use to analyze streaming videos.
*/
export declare class StreamProcessor extends pulumi.CustomResource {
/**
* Get an existing StreamProcessor resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): StreamProcessor;
/**
* Returns true if the given object is an instance of StreamProcessor. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
static isInstance(obj: any): obj is StreamProcessor;
/**
* Amazon Resource Name for the newly created stream processor.
*/
readonly arn: pulumi.Output<string>;
/**
* The BoundingBoxRegionsOfInterest specifies an array of bounding boxes of interest in the video frames to analyze, as part of connected home feature. If an object is partially in a region of interest, Rekognition will tag it as detected if the overlap of the object with the region-of-interest is greater than 20%.
*/
readonly boundingBoxRegionsOfInterest: pulumi.Output<outputs.rekognition.StreamProcessorBoundingBox[] | undefined>;
/**
* Connected home settings to use on a streaming video. You can use a stream processor for connected home features and select what you want the stream processor to detect, such as people or pets. When the stream processor has started, one notification is sent for each object class specified. For more information, see the ConnectedHome section of [StreamProcessorSettings](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorSettings) .
*/
readonly connectedHomeSettings: pulumi.Output<outputs.rekognition.StreamProcessorConnectedHomeSettings | undefined>;
/**
* Allows you to opt in or opt out to share data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams. For more information, see [StreamProcessorDataSharingPreference](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorDataSharingPreference) .
*/
readonly dataSharingPreference: pulumi.Output<outputs.rekognition.StreamProcessorDataSharingPreference | undefined>;
/**
* The input parameters used to recognize faces in a streaming video analyzed by an Amazon Rekognition stream processor. For more information regarding the contents of the parameters, see [FaceSearchSettings](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_FaceSearchSettings) .
*/
readonly faceSearchSettings: pulumi.Output<outputs.rekognition.StreamProcessorFaceSearchSettings | undefined>;
/**
* Amazon Rekognition's Video Stream Processor takes a Kinesis video stream as input. This is the Amazon Kinesis Data Streams instance to which the Amazon Rekognition stream processor streams the analysis results. This must be created within the constraints specified at [KinesisDataStream](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_KinesisDataStream) .
*/
readonly kinesisDataStream: pulumi.Output<outputs.rekognition.StreamProcessorKinesisDataStream | undefined>;
/**
* The Kinesis video stream that provides the source of the streaming video for an Amazon Rekognition Video stream processor. For more information, see [KinesisVideoStream](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_KinesisVideoStream) .
*/
readonly kinesisVideoStream: pulumi.Output<outputs.rekognition.StreamProcessorKinesisVideoStream>;
/**
* The KMS key that is used by Rekognition to encrypt any intermediate customer metadata and store in the customer's S3 bucket.
*/
readonly kmsKeyId: pulumi.Output<string | undefined>;
/**
* Name of the stream processor. It's an identifier you assign to the stream processor. You can use it to manage the stream processor.
*/
readonly name: pulumi.Output<string | undefined>;
/**
* The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the object detection results and completion status of a video analysis operation. Amazon Rekognition publishes a notification the first time an object of interest or a person is detected in the video stream. Amazon Rekognition also publishes an end-of-session notification with a summary when the stream processing session is complete. For more information, see [StreamProcessorNotificationChannel](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorNotificationChannel) .
*/
readonly notificationChannel: pulumi.Output<outputs.rekognition.StreamProcessorNotificationChannel | undefined>;
/**
* The PolygonRegionsOfInterest specifies a set of polygon areas of interest in the video frames to analyze, as part of connected home feature. Each polygon is in turn, an ordered list of Point
*/
readonly polygonRegionsOfInterest: pulumi.Output<outputs.rekognition.StreamProcessorPoint[][] | undefined>;
/**
* ARN of the IAM role that allows access to the stream processor, and provides Rekognition read permissions for KVS stream and write permissions to S3 bucket and SNS topic.
*/
readonly roleArn: pulumi.Output<string>;
/**
* The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. For more information, see the S3Destination section of [StreamProcessorOutput](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorOutput) .
*/
readonly s3Destination: pulumi.Output<outputs.rekognition.StreamProcessorS3Destination | undefined>;
/**
* Current status of the stream processor.
*/
readonly status: pulumi.Output<string>;
/**
* Detailed status message about the stream processor.
*/
readonly statusMessage: pulumi.Output<string>;
/**
* An array of key-value pairs to apply to this resource.
*/
readonly tags: pulumi.Output<outputs.Tag[] | undefined>;
/**
* Create a StreamProcessor resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: StreamProcessorArgs, opts?: pulumi.CustomResourceOptions);
}
/**
* The set of arguments for constructing a StreamProcessor resource.
*/
export interface StreamProcessorArgs {
/**
* The BoundingBoxRegionsOfInterest specifies an array of bounding boxes of interest in the video frames to analyze, as part of connected home feature. If an object is partially in a region of interest, Rekognition will tag it as detected if the overlap of the object with the region-of-interest is greater than 20%.
*/
boundingBoxRegionsOfInterest?: pulumi.Input<pulumi.Input<inputs.rekognition.StreamProcessorBoundingBoxArgs>[]>;
/**
* Connected home settings to use on a streaming video. You can use a stream processor for connected home features and select what you want the stream processor to detect, such as people or pets. When the stream processor has started, one notification is sent for each object class specified. For more information, see the ConnectedHome section of [StreamProcessorSettings](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorSettings) .
*/
connectedHomeSettings?: pulumi.Input<inputs.rekognition.StreamProcessorConnectedHomeSettingsArgs>;
/**
* Allows you to opt in or opt out to share data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams. For more information, see [StreamProcessorDataSharingPreference](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorDataSharingPreference) .
*/
dataSharingPreference?: pulumi.Input<inputs.rekognition.StreamProcessorDataSharingPreferenceArgs>;
/**
* The input parameters used to recognize faces in a streaming video analyzed by an Amazon Rekognition stream processor. For more information regarding the contents of the parameters, see [FaceSearchSettings](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_FaceSearchSettings) .
*/
faceSearchSettings?: pulumi.Input<inputs.rekognition.StreamProcessorFaceSearchSettingsArgs>;
/**
* Amazon Rekognition's Video Stream Processor takes a Kinesis video stream as input. This is the Amazon Kinesis Data Streams instance to which the Amazon Rekognition stream processor streams the analysis results. This must be created within the constraints specified at [KinesisDataStream](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_KinesisDataStream) .
*/
kinesisDataStream?: pulumi.Input<inputs.rekognition.StreamProcessorKinesisDataStreamArgs>;
/**
* The Kinesis video stream that provides the source of the streaming video for an Amazon Rekognition Video stream processor. For more information, see [KinesisVideoStream](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_KinesisVideoStream) .
*/
kinesisVideoStream: pulumi.Input<inputs.rekognition.StreamProcessorKinesisVideoStreamArgs>;
/**
* The KMS key that is used by Rekognition to encrypt any intermediate customer metadata and store in the customer's S3 bucket.
*/
kmsKeyId?: pulumi.Input<string>;
/**
* Name of the stream processor. It's an identifier you assign to the stream processor. You can use it to manage the stream processor.
*/
name?: pulumi.Input<string>;
/**
* The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the object detection results and completion status of a video analysis operation. Amazon Rekognition publishes a notification the first time an object of interest or a person is detected in the video stream. Amazon Rekognition also publishes an end-of-session notification with a summary when the stream processing session is complete. For more information, see [StreamProcessorNotificationChannel](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorNotificationChannel) .
*/
notificationChannel?: pulumi.Input<inputs.rekognition.StreamProcessorNotificationChannelArgs>;
/**
* The PolygonRegionsOfInterest specifies a set of polygon areas of interest in the video frames to analyze, as part of connected home feature. Each polygon is in turn, an ordered list of Point
*/
polygonRegionsOfInterest?: pulumi.Input<pulumi.Input<pulumi.Input<inputs.rekognition.StreamProcessorPointArgs>[]>[]>;
/**
* ARN of the IAM role that allows access to the stream processor, and provides Rekognition read permissions for KVS stream and write permissions to S3 bucket and SNS topic.
*/
roleArn: pulumi.Input<string>;
/**
* The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. For more information, see the S3Destination section of [StreamProcessorOutput](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_StreamProcessorOutput) .
*/
s3Destination?: pulumi.Input<inputs.rekognition.StreamProcessorS3DestinationArgs>;
/**
* An array of key-value pairs to apply to this resource.
*/
tags?: pulumi.Input<pulumi.Input<inputs.TagArgs>[]>;
}