@pulumi/aws-native
Version:
The Pulumi AWS Cloud Control Provider enables you to build, deploy, and manage [any AWS resource that's supported by the AWS Cloud Control API](https://github.com/pulumi/pulumi-aws-native/blob/master/provider/cmd/pulumi-gen-aws-native/supported-types.txt)
162 lines (161 loc) • 7.77 kB
TypeScript
import * as pulumi from "@pulumi/pulumi";
import * as inputs from "../types/input";
import * as outputs from "../types/output";
import * as enums from "../types/enums";
/**
* Resource Type definition for AWS::SageMaker::InferenceExperiment
*/
export declare class InferenceExperiment extends pulumi.CustomResource {
/**
* Get an existing InferenceExperiment resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): InferenceExperiment;
/**
* Returns true if the given object is an instance of InferenceExperiment. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
static isInstance(obj: any): obj is InferenceExperiment;
/**
* The Amazon Resource Name (ARN) of the inference experiment.
*/
readonly arn: pulumi.Output<string>;
/**
* The timestamp at which you created the inference experiment.
*/
readonly creationTime: pulumi.Output<string>;
/**
* The Amazon S3 location and configuration for storing inference request and response data.
*/
readonly dataStorageConfig: pulumi.Output<outputs.sagemaker.InferenceExperimentDataStorageConfig | undefined>;
/**
* The description of the inference experiment.
*/
readonly description: pulumi.Output<string | undefined>;
/**
* The desired state of the experiment after starting or stopping operation.
*/
readonly desiredState: pulumi.Output<enums.sagemaker.InferenceExperimentDesiredState | undefined>;
readonly endpointMetadata: pulumi.Output<outputs.sagemaker.InferenceExperimentEndpointMetadata>;
/**
* The name of the endpoint.
*/
readonly endpointName: pulumi.Output<string>;
/**
* The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
*/
readonly kmsKey: pulumi.Output<string | undefined>;
/**
* The timestamp at which you last modified the inference experiment.
*/
readonly lastModifiedTime: pulumi.Output<string>;
/**
* An array of ModelVariantConfig objects. Each ModelVariantConfig object in the array describes the infrastructure configuration for the corresponding variant.
*/
readonly modelVariants: pulumi.Output<outputs.sagemaker.InferenceExperimentModelVariantConfig[]>;
/**
* The name for the inference experiment.
*/
readonly name: pulumi.Output<string>;
/**
* The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage Amazon SageMaker Inference endpoints for model deployment.
*/
readonly roleArn: pulumi.Output<string>;
/**
* The duration for which the inference experiment ran or will run.
*
* The maximum duration that you can set for an inference experiment is 30 days.
*/
readonly schedule: pulumi.Output<outputs.sagemaker.InferenceExperimentSchedule | undefined>;
/**
* The configuration of `ShadowMode` inference experiment type, which shows the production variant that takes all the inference requests, and the shadow variant to which Amazon SageMaker replicates a percentage of the inference requests. For the shadow variant it also shows the percentage of requests that Amazon SageMaker replicates.
*/
readonly shadowModeConfig: pulumi.Output<outputs.sagemaker.InferenceExperimentShadowModeConfig | undefined>;
/**
* The status of the inference experiment.
*/
readonly status: pulumi.Output<enums.sagemaker.InferenceExperimentStatus>;
/**
* The error message or client-specified reason from the StopInferenceExperiment API, that explains the status of the inference experiment.
*/
readonly statusReason: pulumi.Output<string | undefined>;
/**
* An array of key-value pairs to apply to this resource.
*/
readonly tags: pulumi.Output<outputs.Tag[] | undefined>;
/**
* The type of the inference experiment that you want to run.
*/
readonly type: pulumi.Output<enums.sagemaker.InferenceExperimentType>;
/**
* Create a InferenceExperiment resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: InferenceExperimentArgs, opts?: pulumi.CustomResourceOptions);
}
/**
* The set of arguments for constructing a InferenceExperiment resource.
*/
export interface InferenceExperimentArgs {
/**
* The Amazon S3 location and configuration for storing inference request and response data.
*/
dataStorageConfig?: pulumi.Input<inputs.sagemaker.InferenceExperimentDataStorageConfigArgs>;
/**
* The description of the inference experiment.
*/
description?: pulumi.Input<string>;
/**
* The desired state of the experiment after starting or stopping operation.
*/
desiredState?: pulumi.Input<enums.sagemaker.InferenceExperimentDesiredState>;
/**
* The name of the endpoint.
*/
endpointName: pulumi.Input<string>;
/**
* The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.
*/
kmsKey?: pulumi.Input<string>;
/**
* An array of ModelVariantConfig objects. Each ModelVariantConfig object in the array describes the infrastructure configuration for the corresponding variant.
*/
modelVariants: pulumi.Input<pulumi.Input<inputs.sagemaker.InferenceExperimentModelVariantConfigArgs>[]>;
/**
* The name for the inference experiment.
*/
name?: pulumi.Input<string>;
/**
* The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage Amazon SageMaker Inference endpoints for model deployment.
*/
roleArn: pulumi.Input<string>;
/**
* The duration for which the inference experiment ran or will run.
*
* The maximum duration that you can set for an inference experiment is 30 days.
*/
schedule?: pulumi.Input<inputs.sagemaker.InferenceExperimentScheduleArgs>;
/**
* The configuration of `ShadowMode` inference experiment type, which shows the production variant that takes all the inference requests, and the shadow variant to which Amazon SageMaker replicates a percentage of the inference requests. For the shadow variant it also shows the percentage of requests that Amazon SageMaker replicates.
*/
shadowModeConfig?: pulumi.Input<inputs.sagemaker.InferenceExperimentShadowModeConfigArgs>;
/**
* The error message or client-specified reason from the StopInferenceExperiment API, that explains the status of the inference experiment.
*/
statusReason?: pulumi.Input<string>;
/**
* An array of key-value pairs to apply to this resource.
*/
tags?: pulumi.Input<pulumi.Input<inputs.TagArgs>[]>;
/**
* The type of the inference experiment that you want to run.
*/
type: pulumi.Input<enums.sagemaker.InferenceExperimentType>;
}