@pulumi/aws
Version:
A Pulumi package for creating and managing Amazon Web Services (AWS) cloud resources.
271 lines • 9.83 kB
JavaScript
;
// *** WARNING: this file was generated by pulumi-language-nodejs. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
Object.defineProperty(exports, "__esModule", { value: true });
exports.StreamProcessor = void 0;
const pulumi = require("@pulumi/pulumi");
const utilities = require("../utilities");
/**
* Resource for managing an AWS Rekognition Stream Processor.
*
* > This resource must be configured specifically for your use case, and not all options are compatible with one another. See [Stream Processor API documentation](https://docs.aws.amazon.com/rekognition/latest/APIReference/API_CreateStreamProcessor.html#rekognition-CreateStreamProcessor-request-Input) for configuration information.
*
* > Stream Processors configured for Face Recognition cannot have _any_ properties updated after the fact, and it will result in an AWS API error.
*
* ## Example Usage
*
* ### Label Detection
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as aws from "@pulumi/aws";
*
* const example = new aws.s3.Bucket("example", {bucket: "example-bucket"});
* const exampleTopic = new aws.sns.Topic("example", {name: "example-topic"});
* const exampleVideoStream = new aws.kinesis.VideoStream("example", {
* name: "example-kinesis-input",
* dataRetentionInHours: 1,
* deviceName: "kinesis-video-device-name",
* mediaType: "video/h264",
* });
* const exampleRole = new aws.iam.Role("example", {
* name: "example-role",
* inlinePolicies: [{
* name: "Rekognition-Access",
* policy: pulumi.jsonStringify({
* Version: "2012-10-17",
* Statement: [
* {
* Action: ["s3:PutObject"],
* Effect: "Allow",
* Resource: [pulumi.interpolate`${example.arn}/*`],
* },
* {
* Action: ["sns:Publish"],
* Effect: "Allow",
* Resource: [exampleTopic.arn],
* },
* {
* Action: [
* "kinesis:Get*",
* "kinesis:DescribeStreamSummary",
* ],
* Effect: "Allow",
* Resource: [exampleVideoStream.arn],
* },
* ],
* }),
* }],
* assumeRolePolicy: JSON.stringify({
* Version: "2012-10-17",
* Statement: [{
* Action: "sts:AssumeRole",
* Effect: "Allow",
* Principal: {
* Service: "rekognition.amazonaws.com",
* },
* }],
* }),
* });
* const exampleStreamProcessor = new aws.rekognition.StreamProcessor("example", {
* roleArn: exampleRole.arn,
* name: "example-processor",
* dataSharingPreference: {
* optIn: false,
* },
* output: {
* s3Destination: {
* bucket: example.bucket,
* },
* },
* settings: {
* connectedHome: {
* labels: [
* "PERSON",
* "PET",
* ],
* },
* },
* input: {
* kinesisVideoStream: {
* arn: exampleVideoStream.arn,
* },
* },
* notificationChannel: {
* snsTopicArn: exampleTopic.arn,
* },
* });
* ```
*
* ### Face Detection Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as aws from "@pulumi/aws";
*
* const example = new aws.kinesis.VideoStream("example", {
* name: "example-kinesis-input",
* dataRetentionInHours: 1,
* deviceName: "kinesis-video-device-name",
* mediaType: "video/h264",
* });
* const exampleStream = new aws.kinesis.Stream("example", {
* name: "pulumi-kinesis-example",
* shardCount: 1,
* });
* const exampleRole = new aws.iam.Role("example", {
* name: "example-role",
* inlinePolicies: [{
* name: "Rekognition-Access",
* policy: pulumi.jsonStringify({
* Version: "2012-10-17",
* Statement: [
* {
* Action: [
* "kinesis:Get*",
* "kinesis:DescribeStreamSummary",
* ],
* Effect: "Allow",
* Resource: [example.arn],
* },
* {
* Action: ["kinesis:PutRecord"],
* Effect: "Allow",
* Resource: [exampleStream.arn],
* },
* ],
* }),
* }],
* assumeRolePolicy: JSON.stringify({
* Version: "2012-10-17",
* Statement: [{
* Action: "sts:AssumeRole",
* Effect: "Allow",
* Principal: {
* Service: "rekognition.amazonaws.com",
* },
* }],
* }),
* });
* const exampleCollection = new aws.rekognition.Collection("example", {collectionId: "example-collection"});
* const exampleStreamProcessor = new aws.rekognition.StreamProcessor("example", {
* roleArn: exampleRole.arn,
* name: "example-processor",
* dataSharingPreference: {
* optIn: false,
* },
* regionsOfInterests: [{
* polygons: [
* {
* x: 0.5,
* y: 0.5,
* },
* {
* x: 0.5,
* y: 0.5,
* },
* {
* x: 0.5,
* y: 0.5,
* },
* ],
* }],
* input: {
* kinesisVideoStream: {
* arn: example.arn,
* },
* },
* output: {
* kinesisDataStream: {
* arn: exampleStream.arn,
* },
* },
* settings: {
* faceSearch: {
* collectionId: exampleCollection.id,
* },
* },
* });
* ```
*
* ## Import
*
* Using `pulumi import`, import Rekognition Stream Processor using the `name`. For example:
*
* ```sh
* $ pulumi import aws:rekognition/streamProcessor:StreamProcessor example my-stream
* ```
*/
class StreamProcessor extends pulumi.CustomResource {
/**
* Get an existing StreamProcessor resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param state Any extra arguments used during the lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
static get(name, id, state, opts) {
return new StreamProcessor(name, state, { ...opts, id: id });
}
/**
* Returns true if the given object is an instance of StreamProcessor. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
static isInstance(obj) {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === StreamProcessor.__pulumiType;
}
constructor(name, argsOrState, opts) {
let resourceInputs = {};
opts = opts || {};
if (opts.id) {
const state = argsOrState;
resourceInputs["arn"] = state?.arn;
resourceInputs["dataSharingPreference"] = state?.dataSharingPreference;
resourceInputs["input"] = state?.input;
resourceInputs["kmsKeyId"] = state?.kmsKeyId;
resourceInputs["name"] = state?.name;
resourceInputs["notificationChannel"] = state?.notificationChannel;
resourceInputs["output"] = state?.output;
resourceInputs["region"] = state?.region;
resourceInputs["regionsOfInterests"] = state?.regionsOfInterests;
resourceInputs["roleArn"] = state?.roleArn;
resourceInputs["settings"] = state?.settings;
resourceInputs["streamProcessorArn"] = state?.streamProcessorArn;
resourceInputs["tags"] = state?.tags;
resourceInputs["tagsAll"] = state?.tagsAll;
resourceInputs["timeouts"] = state?.timeouts;
}
else {
const args = argsOrState;
if (args?.roleArn === undefined && !opts.urn) {
throw new Error("Missing required property 'roleArn'");
}
resourceInputs["dataSharingPreference"] = args?.dataSharingPreference;
resourceInputs["input"] = args?.input;
resourceInputs["kmsKeyId"] = args?.kmsKeyId;
resourceInputs["name"] = args?.name;
resourceInputs["notificationChannel"] = args?.notificationChannel;
resourceInputs["output"] = args?.output;
resourceInputs["region"] = args?.region;
resourceInputs["regionsOfInterests"] = args?.regionsOfInterests;
resourceInputs["roleArn"] = args?.roleArn;
resourceInputs["settings"] = args?.settings;
resourceInputs["tags"] = args?.tags;
resourceInputs["timeouts"] = args?.timeouts;
resourceInputs["arn"] = undefined /*out*/;
resourceInputs["streamProcessorArn"] = undefined /*out*/;
resourceInputs["tagsAll"] = undefined /*out*/;
}
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
super(StreamProcessor.__pulumiType, name, resourceInputs, opts);
}
}
exports.StreamProcessor = StreamProcessor;
/** @internal */
StreamProcessor.__pulumiType = 'aws:rekognition/streamProcessor:StreamProcessor';
//# sourceMappingURL=streamProcessor.js.map