UNPKG

@pulumi/databricks

Version:

A Pulumi package for creating and managing databricks cloud resources.

1,140 lines 555 kB
import * as pulumi from "@pulumi/pulumi"; import * as inputs from "../types/input"; export interface AccessControlRuleSetGrantRule { /** * a list of principals who are granted a role. The following format is supported: * * `users/{username}` (also exposed as `aclPrincipalId` attribute of `databricks.User` resource). * * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `databricks.Group` resource). * * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `databricks.ServicePrincipal` resource). */ principals?: pulumi.Input<pulumi.Input<string>[]>; /** * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page), [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role) or [budget policy permissions](https://docs.databricks.com/aws/en/admin/usage/budget-policies#manage-budget-policy-permissions), depending on the `name` defined: * * `accounts/{account_id}/ruleSets/default` * * `roles/marketplace.admin` - Databricks Marketplace administrator. * * `roles/billing.admin` - Billing administrator. * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` * * `roles/group.manager` - Manager of a group. * * `accounts/{account_id}/budgetPolicies/{budget_policy_id}/ruleSets/default` * * `roles/budgetPolicy.manager` - Manager of a budget policy. * * `roles/budgetPolicy.user` - User of a budget policy. */ role: pulumi.Input<string>; } export interface AccountNetworkPolicyEgress { /** * The access policy enforced for egress traffic to the internet */ networkAccess?: pulumi.Input<inputs.AccountNetworkPolicyEgressNetworkAccess>; } export interface AccountNetworkPolicyEgressNetworkAccess { /** * List of internet destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedInternetDestinations?: pulumi.Input<pulumi.Input<inputs.AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination>[]>; /** * List of storage destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedStorageDestinations?: pulumi.Input<pulumi.Input<inputs.AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination>[]>; /** * Optional. When policyEnforcement is not provided, we default to ENFORCE_MODE_ALL_SERVICES */ policyEnforcement?: pulumi.Input<inputs.AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement>; /** * The restriction mode that controls how serverless workloads can access the internet. Possible values are: `FULL_ACCESS`, `RESTRICTED_ACCESS` */ restrictionMode: pulumi.Input<string>; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination { /** * The internet destination to which access will be allowed. Format dependent on the destination type */ destination?: pulumi.Input<string>; /** * The type of internet destination. Currently only DNS_NAME is supported. Possible values are: `DNS_NAME` */ internetDestinationType?: pulumi.Input<string>; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination { /** * The Azure storage account name */ azureStorageAccount?: pulumi.Input<string>; /** * The Azure storage service type (blob, dfs, etc.) */ azureStorageService?: pulumi.Input<string>; bucketName?: pulumi.Input<string>; region?: pulumi.Input<string>; /** * The type of storage destination. Possible values are: `AWS_S3`, `AZURE_STORAGE`, `GOOGLE_CLOUD_STORAGE` */ storageDestinationType?: pulumi.Input<string>; } export interface AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement { /** * When empty, it means dry run for all products. * When non-empty, it means dry run for specific products and for the other products, they will run in enforced mode */ dryRunModeProductFilters?: pulumi.Input<pulumi.Input<string>[]>; /** * The mode of policy enforcement. ENFORCED blocks traffic that violates policy, * while DRY_RUN only logs violations without blocking. When not specified, * defaults to ENFORCED. Possible values are: `DRY_RUN`, `ENFORCED` */ enforcementMode?: pulumi.Input<string>; } export interface AibiDashboardEmbeddingAccessPolicySettingAibiDashboardEmbeddingAccessPolicy { /** * Configured embedding policy. Possible values are `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`. */ accessPolicyType: pulumi.Input<string>; } export interface AibiDashboardEmbeddingApprovedDomainsSettingAibiDashboardEmbeddingApprovedDomains { /** * the list of approved domains. To allow all subdomains for a given domain, use a wildcard symbol (`*`) before the domain name, i.e., `*.databricks.com` will allow to embed into any site under the `databricks.com`. */ approvedDomains: pulumi.Input<pulumi.Input<string>[]>; } export interface AlertCondition { /** * Alert state if the result is empty (`UNKNOWN`, `OK`, `TRIGGERED`) */ emptyResultState?: pulumi.Input<string>; /** * Operator used for comparison in alert evaluation. (Enum: `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `EQUAL`, `NOT_EQUAL`, `IS_NULL`) */ op: pulumi.Input<string>; /** * Name of the column from the query result to use for comparison in alert evaluation: */ operand: pulumi.Input<inputs.AlertConditionOperand>; /** * Threshold value used for comparison in alert evaluation: */ threshold?: pulumi.Input<inputs.AlertConditionThreshold>; } export interface AlertConditionOperand { /** * Block describing the column from the query result to use for comparison in alert evaluation: */ column: pulumi.Input<inputs.AlertConditionOperandColumn>; } export interface AlertConditionOperandColumn { /** * Name of the column. */ name: pulumi.Input<string>; } export interface AlertConditionThreshold { /** * actual value used in comparison (one of the attributes is required): */ value: pulumi.Input<inputs.AlertConditionThresholdValue>; } export interface AlertConditionThresholdValue { /** * boolean value (`true` or `false`) to compare against boolean results. */ boolValue?: pulumi.Input<boolean>; /** * double value to compare against integer and double results. */ doubleValue?: pulumi.Input<number>; /** * string value to compare against string results. */ stringValue?: pulumi.Input<string>; } export interface AlertV2Evaluation { /** * Operator used for comparison in alert evaluation. Possible values are: `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NOT_NULL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL` */ comparisonOperator?: pulumi.Input<string>; /** * Alert state if result is empty. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ emptyResultState?: pulumi.Input<string>; /** * (string) - Timestamp of the last evaluation */ lastEvaluatedAt?: pulumi.Input<string>; /** * User or Notification Destination to notify when alert is triggered */ notification?: pulumi.Input<inputs.AlertV2EvaluationNotification>; /** * Source column from result to use to evaluate alert */ source?: pulumi.Input<inputs.AlertV2EvaluationSource>; /** * (string) - Latest state of alert evaluation. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ state?: pulumi.Input<string>; /** * Threshold to user for alert evaluation, can be a column or a value */ threshold?: pulumi.Input<inputs.AlertV2EvaluationThreshold>; } export interface AlertV2EvaluationNotification { /** * Whether to notify alert subscribers when alert returns back to normal */ notifyOnOk?: pulumi.Input<boolean>; /** * Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it can be triggered again. If 0 or not specified, the alert will not be triggered again */ retriggerSeconds?: pulumi.Input<number>; subscriptions?: pulumi.Input<pulumi.Input<inputs.AlertV2EvaluationNotificationSubscription>[]>; } export interface AlertV2EvaluationNotificationSubscription { destinationId?: pulumi.Input<string>; userEmail?: pulumi.Input<string>; } export interface AlertV2EvaluationSource { /** * . Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: pulumi.Input<string>; display?: pulumi.Input<string>; name?: pulumi.Input<string>; } export interface AlertV2EvaluationThreshold { column?: pulumi.Input<inputs.AlertV2EvaluationThresholdColumn>; value?: pulumi.Input<inputs.AlertV2EvaluationThresholdValue>; } export interface AlertV2EvaluationThresholdColumn { /** * . Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: pulumi.Input<string>; display?: pulumi.Input<string>; name?: pulumi.Input<string>; } export interface AlertV2EvaluationThresholdValue { boolValue?: pulumi.Input<boolean>; doubleValue?: pulumi.Input<number>; stringValue?: pulumi.Input<string>; } export interface AlertV2Schedule { /** * Indicate whether this schedule is paused or not. Possible values are: `PAUSED`, `UNPAUSED` */ pauseStatus?: pulumi.Input<string>; /** * A cron expression using quartz syntax that specifies the schedule for this pipeline. * Should use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html */ quartzCronSchedule?: pulumi.Input<string>; /** * A Java timezone id. The schedule will be resolved using this timezone. * This will be combined with the quartzCronSchedule to determine the schedule. * See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details */ timezoneId?: pulumi.Input<string>; } export interface AppActiveDeployment { /** * The creation time of the app. */ createTime?: pulumi.Input<string>; /** * The email of the user that created the app. */ creator?: pulumi.Input<string>; deploymentArtifacts?: pulumi.Input<inputs.AppActiveDeploymentDeploymentArtifacts>; deploymentId?: pulumi.Input<string>; mode?: pulumi.Input<string>; sourceCodePath?: pulumi.Input<string>; status?: pulumi.Input<inputs.AppActiveDeploymentStatus>; /** * The update time of the app. */ updateTime?: pulumi.Input<string>; } export interface AppActiveDeploymentDeploymentArtifacts { sourceCodePath?: pulumi.Input<string>; } export interface AppActiveDeploymentStatus { /** * Application status message */ message?: pulumi.Input<string>; /** * State of the application. */ state?: pulumi.Input<string>; } export interface AppAppStatus { /** * Application status message */ message?: pulumi.Input<string>; /** * State of the application. */ state?: pulumi.Input<string>; } export interface AppComputeStatus { /** * Application status message */ message?: pulumi.Input<string>; /** * State of the application. */ state?: pulumi.Input<string>; } export interface AppPendingDeployment { /** * The creation time of the app. */ createTime?: pulumi.Input<string>; /** * The email of the user that created the app. */ creator?: pulumi.Input<string>; deploymentArtifacts?: pulumi.Input<inputs.AppPendingDeploymentDeploymentArtifacts>; deploymentId?: pulumi.Input<string>; mode?: pulumi.Input<string>; sourceCodePath?: pulumi.Input<string>; status?: pulumi.Input<inputs.AppPendingDeploymentStatus>; /** * The update time of the app. */ updateTime?: pulumi.Input<string>; } export interface AppPendingDeploymentDeploymentArtifacts { sourceCodePath?: pulumi.Input<string>; } export interface AppPendingDeploymentStatus { /** * Application status message */ message?: pulumi.Input<string>; /** * State of the application. */ state?: pulumi.Input<string>; } export interface AppResource { /** * The description of the resource. * * Exactly one of the following attributes must be provided: */ description?: pulumi.Input<string>; /** * attribute */ job?: pulumi.Input<inputs.AppResourceJob>; /** * The name of the resource. */ name: pulumi.Input<string>; /** * attribute */ secret?: pulumi.Input<inputs.AppResourceSecret>; /** * attribute */ servingEndpoint?: pulumi.Input<inputs.AppResourceServingEndpoint>; /** * attribute */ sqlWarehouse?: pulumi.Input<inputs.AppResourceSqlWarehouse>; /** * attribute (see the [API docs](https://docs.databricks.com/api/workspace/apps/create#resources-uc_securable) for full list of supported UC objects) */ ucSecurable?: pulumi.Input<inputs.AppResourceUcSecurable>; } export interface AppResourceJob { /** * Id of the job to grant permission on. */ id: pulumi.Input<string>; /** * Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`. */ permission: pulumi.Input<string>; } export interface AppResourceSecret { /** * Key of the secret to grant permission on. */ key: pulumi.Input<string>; /** * Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: `READ`, `WRITE`, `MANAGE`. */ permission: pulumi.Input<string>; /** * Scope of the secret to grant permission on. */ scope: pulumi.Input<string>; } export interface AppResourceServingEndpoint { /** * Name of the serving endpoint to grant permission on. */ name: pulumi.Input<string>; /** * Permission to grant on the serving endpoint. Supported permissions are: `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`. */ permission: pulumi.Input<string>; } export interface AppResourceSqlWarehouse { /** * Id of the SQL warehouse to grant permission on. */ id: pulumi.Input<string>; /** * Permission to grant on the SQL warehouse. Supported permissions are: `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`. */ permission: pulumi.Input<string>; } export interface AppResourceUcSecurable { /** * Permissions to grant on UC securable, i.e. `READ_VOLUME`, `WRITE_VOLUME`. */ permission: pulumi.Input<string>; /** * the full name of UC securable, i.e. `my-catalog.my-schema.my-volume`. */ securableFullName: pulumi.Input<string>; /** * the type of UC securable, i.e. `VOLUME`. */ securableType: pulumi.Input<string>; } export interface ArtifactAllowlistArtifactMatcher { /** * The artifact path or maven coordinate. */ artifact: pulumi.Input<string>; /** * The pattern matching type of the artifact. Only `PREFIX_MATCH` is supported. */ matchType: pulumi.Input<string>; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace { canToggle?: pulumi.Input<boolean>; enabled: pulumi.Input<boolean>; enablementDetails?: pulumi.Input<inputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails>; maintenanceWindow?: pulumi.Input<inputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow>; restartEvenIfNoUpdatesAvailable?: pulumi.Input<boolean>; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails { forcedForComplianceMode?: pulumi.Input<boolean>; unavailableForDisabledEntitlement?: pulumi.Input<boolean>; unavailableForNonEnterpriseTier?: pulumi.Input<boolean>; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: pulumi.Input<inputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule>; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { dayOfWeek: pulumi.Input<string>; frequency: pulumi.Input<string>; windowStartTime?: pulumi.Input<inputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime>; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours: pulumi.Input<number>; minutes: pulumi.Input<number>; } export interface BudgetAlertConfiguration { /** * List of action configurations to take when the budget alert is triggered. Consists of the following fields: */ actionConfigurations?: pulumi.Input<pulumi.Input<inputs.BudgetAlertConfigurationActionConfiguration>[]>; alertConfigurationId?: pulumi.Input<string>; /** * The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantityType`. */ quantityThreshold?: pulumi.Input<string>; /** * The way to calculate cost for this budget alert. This is what quantityThreshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`) */ quantityType?: pulumi.Input<string>; /** * The time window of usage data for the budget. (Enum: `MONTH`) */ timePeriod?: pulumi.Input<string>; /** * The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`) */ triggerType?: pulumi.Input<string>; } export interface BudgetAlertConfigurationActionConfiguration { actionConfigurationId?: pulumi.Input<string>; /** * The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`) */ actionType?: pulumi.Input<string>; /** * The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to. */ target?: pulumi.Input<string>; } export interface BudgetFilter { /** * List of tags to filter by. Consists of the following fields: */ tags?: pulumi.Input<pulumi.Input<inputs.BudgetFilterTag>[]>; /** * Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields: */ workspaceId?: pulumi.Input<inputs.BudgetFilterWorkspaceId>; } export interface BudgetFilterTag { /** * The key of the tag. */ key?: pulumi.Input<string>; /** * Consists of the following fields: */ value?: pulumi.Input<inputs.BudgetFilterTagValue>; } export interface BudgetFilterTagValue { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: pulumi.Input<string>; /** * The values to filter by. */ values?: pulumi.Input<pulumi.Input<string>[]>; } export interface BudgetFilterWorkspaceId { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: pulumi.Input<string>; /** * The values to filter by. */ values?: pulumi.Input<pulumi.Input<number>[]>; } export interface BudgetPolicyCustomTag { /** * The key of the tag. * - Must be unique among all custom tags of the same policy * - Cannot be “budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - * these tags are preserved */ key: pulumi.Input<string>; /** * The value of the tag */ value?: pulumi.Input<string>; } export interface ClusterAutoscale { /** * The maximum number of workers to which the cluster can scale up when overloaded. maxWorkers must be strictly greater than min_workers. * * When using a [Single Node cluster](https://docs.databricks.com/clusters/single-node.html), `numWorkers` needs to be `0`. It can be set to `0` explicitly, or simply not specified, as it defaults to `0`. When `numWorkers` is `0`, provider checks for presence of the required Spark configurations: * * * `spark.master` must have prefix `local`, like `local[*]` * * `spark.databricks.cluster.profile` must have value `singleNode` * * and also `customTag` entry: * * * `"ResourceClass" = "SingleNode"` * * The following example demonstrates how to create an single node cluster: * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const smallest = databricks.getNodeType({ * localDisk: true, * }); * const latestLts = databricks.getSparkVersion({ * longTermSupport: true, * }); * const singleNode = new databricks.Cluster("single_node", { * clusterName: "Single Node", * sparkVersion: latestLts.then(latestLts => latestLts.id), * nodeTypeId: smallest.then(smallest => smallest.id), * autoterminationMinutes: 20, * sparkConf: { * "spark.databricks.cluster.profile": "singleNode", * "spark.master": "local[*]", * }, * customTags: { * ResourceClass: "SingleNode", * }, * }); * ``` */ maxWorkers?: pulumi.Input<number>; /** * The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation. */ minWorkers?: pulumi.Input<number>; } export interface ClusterAwsAttributes { /** * Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT`, `SPOT_WITH_FALLBACK` and `ON_DEMAND`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. Backend default value is `SPOT_WITH_FALLBACK` and could change in the future */ availability?: pulumi.Input<string>; /** * The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden. */ ebsVolumeCount?: pulumi.Input<number>; ebsVolumeIops?: pulumi.Input<number>; /** * The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized). */ ebsVolumeSize?: pulumi.Input<number>; ebsVolumeThroughput?: pulumi.Input<number>; /** * The type of EBS volumes that will be launched with this cluster. Valid values are `GENERAL_PURPOSE_SSD` or `THROUGHPUT_OPTIMIZED_HDD`. Use this option only if you're not picking *Delta Optimized `i3.*`* node types. */ ebsVolumeType?: pulumi.Input<string>; /** * The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. If unspecified, the default value is 0. */ firstOnDemand?: pulumi.Input<number>; /** * Nodes for this cluster will only be placed on AWS instances with this instance profile. Please see databricks.InstanceProfile resource documentation for extended examples on adding a valid instance profile using Pulumi. */ instanceProfileArn?: pulumi.Input<string>; /** * The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new `i3.xlarge` spot instance, then the max price is half of the price of on-demand `i3.xlarge` instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand `i3.xlarge` instances. If not specified, the default value is `100`. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than `10000`. */ spotBidPricePercent?: pulumi.Input<number>; /** * Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like `us-west-2a`. The provided availability zone must be in the same region as the Databricks deployment. For example, `us-west-2a` is not a valid zone ID if the Databricks deployment resides in the `us-east-1` region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value `auto`. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors. */ zoneId?: pulumi.Input<string>; } export interface ClusterAzureAttributes { /** * Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`, and `ON_DEMAND_AZURE`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. */ availability?: pulumi.Input<string>; /** * The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. */ firstOnDemand?: pulumi.Input<number>; logAnalyticsInfo?: pulumi.Input<inputs.ClusterAzureAttributesLogAnalyticsInfo>; /** * The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to `-1`, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance. */ spotBidMaxPrice?: pulumi.Input<number>; } export interface ClusterAzureAttributesLogAnalyticsInfo { logAnalyticsPrimaryKey?: pulumi.Input<string>; logAnalyticsWorkspaceId?: pulumi.Input<string>; } export interface ClusterClusterLogConf { dbfs?: pulumi.Input<inputs.ClusterClusterLogConfDbfs>; s3?: pulumi.Input<inputs.ClusterClusterLogConfS3>; volumes?: pulumi.Input<inputs.ClusterClusterLogConfVolumes>; } export interface ClusterClusterLogConfDbfs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterClusterLogConfS3 { /** * Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs. */ cannedAcl?: pulumi.Input<string>; /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; /** * Enable server-side encryption, false by default. */ enableEncryption?: pulumi.Input<boolean>; /** * The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`. */ encryptionType?: pulumi.Input<string>; /** * S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used. */ endpoint?: pulumi.Input<string>; /** * KMS key used if encryption is enabled and encryption type is set to `sse-kms`. */ kmsKey?: pulumi.Input<string>; /** * S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used. */ region?: pulumi.Input<string>; } export interface ClusterClusterLogConfVolumes { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterClusterMountInfo { /** * path inside the Spark container. * * For example, you can mount Azure Data Lake Storage container using the following code: * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const storageAccount = "ewfw3ggwegwg"; * const storageContainer = "test"; * const withNfs = new databricks.Cluster("with_nfs", {clusterMountInfos: [{ * networkFilesystemInfo: { * serverAddress: `${storageAccount}.blob.core.windows.net`, * mountOptions: "sec=sys,vers=3,nolock,proto=tcp", * }, * remoteMountDirPath: `${storageAccount}/${storageContainer}`, * localMountDirPath: "/mnt/nfs-test", * }]}); * ``` */ localMountDirPath: pulumi.Input<string>; /** * block specifying connection. It consists of: */ networkFilesystemInfo: pulumi.Input<inputs.ClusterClusterMountInfoNetworkFilesystemInfo>; /** * string specifying path to mount on the remote service. */ remoteMountDirPath?: pulumi.Input<string>; } export interface ClusterClusterMountInfoNetworkFilesystemInfo { /** * string that will be passed as options passed to the `mount` command. */ mountOptions?: pulumi.Input<string>; /** * host name. */ serverAddress: pulumi.Input<string>; } export interface ClusterDockerImage { /** * `basic_auth.username` and `basic_auth.password` for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password. * * Example usage with azurermContainerRegistry and docker_registry_image, that you can adapt to your specific use-case: * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * import * as docker from "@pulumi/docker"; * * const _this = new docker.index.RegistryImage("this", { * build: [{}], * name: `${thisAzurermContainerRegistry.loginServer}/sample:latest`, * }); * const thisCluster = new databricks.Cluster("this", {dockerImage: { * url: _this.name, * basicAuth: { * username: thisAzurermContainerRegistry.adminUsername, * password: thisAzurermContainerRegistry.adminPassword, * }, * }}); * ``` */ basicAuth?: pulumi.Input<inputs.ClusterDockerImageBasicAuth>; /** * URL for the Docker image */ url: pulumi.Input<string>; } export interface ClusterDockerImageBasicAuth { password: pulumi.Input<string>; username: pulumi.Input<string>; } export interface ClusterGcpAttributes { /** * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. */ availability?: pulumi.Input<string>; /** * Boot disk size in GB */ bootDiskSize?: pulumi.Input<number>; /** * Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources. */ googleServiceAccount?: pulumi.Input<string>; /** * Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. */ localSsdCount?: pulumi.Input<number>; /** * if we should use preemptible executors ([GCP documentation](https://cloud.google.com/compute/docs/instances/preemptible)). *Warning: this field is deprecated in favor of `availability`, and will be removed soon.* */ usePreemptibleExecutors?: pulumi.Input<boolean>; /** * Identifier for the availability zone in which the cluster resides. This can be one of the following: * * `HA` (default): High availability, spread nodes across availability zones for a Databricks deployment region. * * `AUTO`: Databricks picks an availability zone to schedule the cluster on. * * name of a GCP availability zone: pick one of the available zones from the [list of available availability zones](https://cloud.google.com/compute/docs/regions-zones#available). */ zoneId?: pulumi.Input<string>; } export interface ClusterInitScript { abfss?: pulumi.Input<inputs.ClusterInitScriptAbfss>; /** * @deprecated For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'. */ dbfs?: pulumi.Input<inputs.ClusterInitScriptDbfs>; file?: pulumi.Input<inputs.ClusterInitScriptFile>; gcs?: pulumi.Input<inputs.ClusterInitScriptGcs>; s3?: pulumi.Input<inputs.ClusterInitScriptS3>; volumes?: pulumi.Input<inputs.ClusterInitScriptVolumes>; workspace?: pulumi.Input<inputs.ClusterInitScriptWorkspace>; } export interface ClusterInitScriptAbfss { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterInitScriptDbfs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterInitScriptFile { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterInitScriptGcs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterInitScriptS3 { /** * Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs. */ cannedAcl?: pulumi.Input<string>; /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; /** * Enable server-side encryption, false by default. */ enableEncryption?: pulumi.Input<boolean>; /** * The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`. */ encryptionType?: pulumi.Input<string>; /** * S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used. */ endpoint?: pulumi.Input<string>; /** * KMS key used if encryption is enabled and encryption type is set to `sse-kms`. */ kmsKey?: pulumi.Input<string>; /** * S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used. */ region?: pulumi.Input<string>; } export interface ClusterInitScriptVolumes { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterInitScriptWorkspace { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: pulumi.Input<string>; } export interface ClusterLibrary { cran?: pulumi.Input<inputs.ClusterLibraryCran>; egg?: pulumi.Input<string>; jar?: pulumi.Input<string>; maven?: pulumi.Input<inputs.ClusterLibraryMaven>; pypi?: pulumi.Input<inputs.ClusterLibraryPypi>; requirements?: pulumi.Input<string>; whl?: pulumi.Input<string>; } export interface ClusterLibraryCran { package: pulumi.Input<string>; repo?: pulumi.Input<string>; } export interface ClusterLibraryMaven { coordinates: pulumi.Input<string>; exclusions?: pulumi.Input<pulumi.Input<string>[]>; repo?: pulumi.Input<string>; } export interface ClusterLibraryPypi { package: pulumi.Input<string>; repo?: pulumi.Input<string>; } export interface ClusterPolicyLibrary { cran?: pulumi.Input<inputs.ClusterPolicyLibraryCran>; egg?: pulumi.Input<string>; jar?: pulumi.Input<string>; maven?: pulumi.Input<inputs.ClusterPolicyLibraryMaven>; pypi?: pulumi.Input<inputs.ClusterPolicyLibraryPypi>; requirements?: pulumi.Input<string>; whl?: pulumi.Input<string>; } export interface ClusterPolicyLibraryCran { package: pulumi.Input<string>; repo?: pulumi.Input<string>; } export interface ClusterPolicyLibraryMaven { coordinates: pulumi.Input<string>; exclusions?: pulumi.Input<pulumi.Input<string>[]>; repo?: pulumi.Input<string>; } export interface ClusterPolicyLibraryPypi { package: pulumi.Input<string>; repo?: pulumi.Input<string>; } export interface ClusterWorkloadType { clients: pulumi.Input<inputs.ClusterWorkloadTypeClients>; } export interface ClusterWorkloadTypeClients { /** * boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: `true`. * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const withNfs = new databricks.Cluster("with_nfs", {workloadType: { * clients: { * jobs: false, * notebooks: true, * }, * }}); * ``` */ jobs?: pulumi.Input<boolean>; /** * boolean flag defining if it's possible to run notebooks on this cluster. Default: `true`. */ notebooks?: pulumi.Input<boolean>; } export interface ComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace { complianceStandards: pulumi.Input<pulumi.Input<string>[]>; isEnabled: pulumi.Input<boolean>; } export interface ConnectionProvisioningInfo { state?: pulumi.Input<string>; } export interface CredentialAwsIamRole { externalId?: pulumi.Input<string>; /** * The Amazon Resource Name (ARN) of the AWS IAM role you want to use to setup the trust policy, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF` * * `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (recommended over `azureServicePrincipal`): */ roleArn?: pulumi.Input<string>; unityCatalogIamArn?: pulumi.Input<string>; } export interface CredentialAzureManagedIdentity { /** * The Resource ID of the Azure Databricks Access Connector resource, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.Databricks/accessConnectors/connector-name`. */ accessConnectorId: pulumi.Input<string>; /** * Unique ID of the credential. */ credentialId?: pulumi.Input<string>; /** * The Resource ID of the Azure User Assigned Managed Identity associated with Azure Databricks Access Connector, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.ManagedIdentity/userAssignedIdentities/user-managed-identity-name`. * * `azureServicePrincipal` optional configuration block to use service principal as credential details for Azure. Only applicable when purpose is `STORAGE` (Legacy): */ managedIdentityId?: pulumi.Input<string>; } export interface CredentialAzureServicePrincipal { /** * The application ID of the application registration within the referenced AAD tenant */ applicationId: pulumi.Input<string>; /** * The client secret generated for the above app ID in AAD. **This field is redacted on output** * * `databricksGcpServiceAccount` optional configuration block for creating a Databricks-managed GCP Service Account: */ clientSecret: pulumi.Input<string>; /** * The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application */ directoryId: pulumi.Input<string>; } export interface CredentialDatabricksGcpServiceAccount { /** * Unique ID of the credential. */ credentialId?: pulumi.Input<string>; /** * The email of the GCP service account created, to be granted access to relevant buckets. */ email?: pulumi.Input<string>; privateKeyId?: pulumi.Input<string>; } export interface CustomAppIntegrationTokenAccessPolicy { /** * access token time to live (TTL) in minutes. */ accessTokenTtlInMinutes?: pulumi.Input<number>; /** * refresh token TTL in minutes. The TTL of refresh token cannot be lower than TTL of access token. */ refreshTokenTtlInMinutes?: pulumi.Input<number>; } export interface DefaultNamespaceSettingNamespace { /** * The value for the setting. */ value?: pulumi.Input<string>; } export interface DisableLegacyAccessSettingDisableLegacyAccess { value: pulumi.Input<boolean>; } export interface DisableLegacyDbfsSettingDisableLegacyDbfs { /** * The boolean value for the setting. */ value: pulumi.Input<boolean>; } export interface DisableLegacyFeaturesSettingDisableLegacyFeatures { /** * The boolean value for the setting. */ value: pulumi.Input<boolean>; } export interface EnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace { isEnabled: pulumi.Input<boolean>; } export interface ExternalLocationEncryptionDetails { sseEncryptionDetails?: pulumi.Input<inputs.ExternalLocationEncryptionDetailsSseEncryptionDetails>; } export interface ExternalLocationEncryptionDetailsSseEncryptionDetails { algorithm?: pulumi.Input<string>; awsKmsKeyArn?: pulumi.Input<string>; } export interface ExternalLocationFileEventQueue { /** * Configuration for managed Azure Queue Storage queue. */ managedAqs?: pulumi.Input<inputs.ExternalLocationFileEventQueueManagedAqs>; /** * Configuration for managed Google Cloud Pub/Sub queue. */ managedPubsub?: pulumi.Input<inputs.ExternalLocationFileEventQueueManagedPubsub>; /** * Configuration for managed Amazon SQS queue. */ managedSqs?: pulumi.Input<inputs.ExternalLocationFileEventQueueManagedSqs>; /** * Configuration for provided Azure Storage Queue. */ providedAqs?: pulumi.Input<inputs.ExternalLocationFileEventQueueProvidedAqs>; /** * Configuration for provided Google Cloud Pub/Sub queue. */ providedPubsub?: pulumi.Input<inputs.ExternalLocationFileEventQueueProvidedPubsub>; /** * Configuration for provided Amazon SQS queue. */ providedSqs?: pulumi.Input<inputs.ExternalLocationFileEventQueueProvidedSqs>; } export interface ExternalLocationFileEventQueueManagedAqs { /** * The ID of the managed resource. */ managedResourceId?: pulumi.Input<string>; queueUrl?: pulumi.Input<string>; /** * The Azure resource group. */ resourceGroup: pulumi.Input<string>; /** * The Azure subscription ID. */ subscriptionId: pulumi.Input<string>; } export interface ExternalLocationFileEventQueueManagedPubsub { /** * The ID of the managed resource. */ managedResourceId?: pulumi.Input<string>; /** * The name of the subscription. */ subscriptionName?: pulumi.Input<string>; } export interface ExternalLocationFileEventQueueManagedSqs { /** * The ID of the managed resource. */ managedResourceId?: pulumi.Input<string>; queueUrl?: pulumi.Input<string>; } export interface ExternalLocationFileEventQueueProvidedAqs { managedResourceId?: pulumi.Input<string>; /** * The URL of the queue. */ queueUrl: pulumi.Input<string>; /** * The Azure resource group. */ resourceGroup?: pulumi.Input<string>; /** * The Azure subscription ID. */ subscriptionId?: pulumi.Input<string>; } export interface ExternalLocationFileEventQueueProvidedPubsub { managedResourceId?: pulumi.Input<string>; /** * The name of the subscription. */ subscriptionName: pulumi.Input<string>; } export interface ExternalLocationFileEventQueueProvidedSqs { managedResourceId?: pulumi.Input<string>; /** * The URL of the SQS queue. */ queueUrl: pulumi.Input<string>; } export interface GetAccountNetworkPolicyEgress { /** * (EgressNetworkPolicyNetworkAccessPolicy) - The access policy enforced for egress traffic to the internet */ networkAccess?: inputs.GetAccountNetworkPolicyEgressNetworkAccess; } export interface GetAccountNetworkPolicyEgressArgs { /** * (EgressNetworkPolicyNetworkAccessPolicy) - The access policy enforced for egress traffic to the internet */ networkAccess?: pulumi.Input<inputs.GetAccountNetworkPolicyEgressNetworkAccessArgs>; } export interface GetAccountNetworkPolicyEgressNetworkAccess { /** * (list of EgressNetworkPolicyNetworkAccessPolicyInternetDestination) - List of internet destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedInternetDestinations?: inputs.GetAccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination[]; /** * (list of EgressNetworkPolicyNetworkAccessPolicyStorageDestination) - List of storage destinations that serverless workloads are allowed to access when in RESTRICTED_A