@pulumi/databricks
Version:
A Pulumi package for creating and managing databricks cloud resources.
1,418 lines • 338 kB
TypeScript
import * as outputs from "../types/output";
export interface AccessControlRuleSetGrantRule {
/**
* a list of principals who are granted a role. The following format is supported:
* * `users/{username}` (also exposed as `aclPrincipalId` attribute of `databricks.User` resource).
* * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `databricks.Group` resource).
* * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `databricks.ServicePrincipal` resource).
*/
principals?: string[];
/**
* Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page), [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role) or [budget policy permissions](https://docs.databricks.com/aws/en/admin/usage/budget-policies#manage-budget-policy-permissions), depending on the `name` defined:
* * `accounts/{account_id}/ruleSets/default`
* * `roles/marketplace.admin` - Databricks Marketplace administrator.
* * `roles/billing.admin` - Billing administrator.
* * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default`
* * `roles/servicePrincipal.manager` - Manager of a service principal.
* * `roles/servicePrincipal.user` - User of a service principal.
* * `accounts/{account_id}/groups/{group_id}/ruleSets/default`
* * `roles/group.manager` - Manager of a group.
* * `accounts/{account_id}/budgetPolicies/{budget_policy_id}/ruleSets/default`
* * `roles/budgetPolicy.manager` - Manager of a budget policy.
* * `roles/budgetPolicy.user` - User of a budget policy.
*/
role: string;
}
export interface AibiDashboardEmbeddingAccessPolicySettingAibiDashboardEmbeddingAccessPolicy {
/**
* Configured embedding policy. Possible values are `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`.
*/
accessPolicyType: string;
}
export interface AibiDashboardEmbeddingApprovedDomainsSettingAibiDashboardEmbeddingApprovedDomains {
/**
* the list of approved domains. To allow all subdomains for a given domain, use a wildcard symbol (`*`) before the domain name, i.e., `*.databricks.com` will allow to embed into any site under the `databricks.com`.
*/
approvedDomains: string[];
}
export interface AlertCondition {
/**
* Alert state if the result is empty (`UNKNOWN`, `OK`, `TRIGGERED`)
*/
emptyResultState?: string;
/**
* Operator used for comparison in alert evaluation. (Enum: `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `EQUAL`, `NOT_EQUAL`, `IS_NULL`)
*/
op: string;
/**
* Name of the column from the query result to use for comparison in alert evaluation:
*/
operand: outputs.AlertConditionOperand;
/**
* Threshold value used for comparison in alert evaluation:
*/
threshold?: outputs.AlertConditionThreshold;
}
export interface AlertConditionOperand {
/**
* Block describing the column from the query result to use for comparison in alert evaluation:
*/
column: outputs.AlertConditionOperandColumn;
}
export interface AlertConditionOperandColumn {
/**
* Name of the column.
*/
name: string;
}
export interface AlertConditionThreshold {
/**
* actual value used in comparison (one of the attributes is required):
*/
value: outputs.AlertConditionThresholdValue;
}
export interface AlertConditionThresholdValue {
/**
* boolean value (`true` or `false`) to compare against boolean results.
*/
boolValue?: boolean;
/**
* double value to compare against integer and double results.
*/
doubleValue?: number;
/**
* string value to compare against string results.
*/
stringValue?: string;
}
export interface AppActiveDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.AppActiveDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.AppActiveDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface AppActiveDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface AppActiveDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface AppAppStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface AppComputeStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface AppPendingDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.AppPendingDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.AppPendingDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface AppPendingDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface AppPendingDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface AppResource {
/**
* The description of the resource.
*
* Exactly one of the following attributes must be provided:
*/
description?: string;
/**
* attribute
*/
job?: outputs.AppResourceJob;
/**
* The name of the resource.
*/
name: string;
/**
* attribute
*/
secret?: outputs.AppResourceSecret;
/**
* attribute
*/
servingEndpoint?: outputs.AppResourceServingEndpoint;
/**
* attribute
*/
sqlWarehouse?: outputs.AppResourceSqlWarehouse;
}
export interface AppResourceJob {
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface AppResourceSecret {
/**
* Key of the secret to grant permission on.
*/
key: string;
/**
* Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: `READ`, `WRITE`, `MANAGE`.
*/
permission: string;
/**
* Scope of the secret to grant permission on.
*/
scope: string;
}
export interface AppResourceServingEndpoint {
/**
* Name of the serving endpoint to grant permission on.
*/
name: string;
/**
* Permission to grant on the serving endpoint. Supported permissions are: `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`.
*/
permission: string;
}
export interface AppResourceSqlWarehouse {
/**
* Id of the SQL warehouse to grant permission on.
*/
id: string;
/**
* Permission to grant on the SQL warehouse. Supported permissions are: `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`.
*/
permission: string;
}
export interface ArtifactAllowlistArtifactMatcher {
/**
* The artifact path or maven coordinate.
*/
artifact: string;
/**
* The pattern matching type of the artifact. Only `PREFIX_MATCH` is supported.
*/
matchType: string;
}
export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace {
canToggle?: boolean;
enabled: boolean;
enablementDetails: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails;
maintenanceWindow?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow;
restartEvenIfNoUpdatesAvailable?: boolean;
}
export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails {
forcedForComplianceMode?: boolean;
unavailableForDisabledEntitlement?: boolean;
unavailableForNonEnterpriseTier?: boolean;
}
export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow {
weekDayBasedSchedule?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule;
}
export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule {
dayOfWeek: string;
frequency: string;
windowStartTime?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime;
}
export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime {
hours: number;
minutes: number;
}
export interface BudgetAlertConfiguration {
/**
* List of action configurations to take when the budget alert is triggered. Consists of the following fields:
*/
actionConfigurations?: outputs.BudgetAlertConfigurationActionConfiguration[];
alertConfigurationId: string;
/**
* The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantityType`.
*/
quantityThreshold?: string;
/**
* The way to calculate cost for this budget alert. This is what quantityThreshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`)
*/
quantityType?: string;
/**
* The time window of usage data for the budget. (Enum: `MONTH`)
*/
timePeriod?: string;
/**
* The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`)
*/
triggerType?: string;
}
export interface BudgetAlertConfigurationActionConfiguration {
actionConfigurationId: string;
/**
* The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`)
*/
actionType?: string;
/**
* The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to.
*/
target?: string;
}
export interface BudgetFilter {
/**
* List of tags to filter by. Consists of the following fields:
*/
tags?: outputs.BudgetFilterTag[];
/**
* Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields:
*/
workspaceId?: outputs.BudgetFilterWorkspaceId;
}
export interface BudgetFilterTag {
/**
* The key of the tag.
*/
key?: string;
/**
* Consists of the following fields:
*/
value?: outputs.BudgetFilterTagValue;
}
export interface BudgetFilterTagValue {
/**
* The operator to use for the filter. (Enum: `IN`)
*/
operator?: string;
/**
* The values to filter by.
*/
values?: string[];
}
export interface BudgetFilterWorkspaceId {
/**
* The operator to use for the filter. (Enum: `IN`)
*/
operator?: string;
/**
* The values to filter by.
*/
values?: number[];
}
export interface BudgetPolicyCustomTag {
/**
* The key of the tag. - Must be unique among all custom tags of the same policy. Cannot be “budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" as these tags are preserved.
*/
key: string;
/**
* The value of the tag.
*/
value?: string;
}
export interface ClusterAutoscale {
/**
* The maximum number of workers to which the cluster can scale up when overloaded. maxWorkers must be strictly greater than min_workers.
*
* When using a [Single Node cluster](https://docs.databricks.com/clusters/single-node.html), `numWorkers` needs to be `0`. It can be set to `0` explicitly, or simply not specified, as it defaults to `0`. When `numWorkers` is `0`, provider checks for presence of the required Spark configurations:
*
* * `spark.master` must have prefix `local`, like `local[*]`
* * `spark.databricks.cluster.profile` must have value `singleNode`
*
* and also `customTag` entry:
*
* * `"ResourceClass" = "SingleNode"`
*
* The following example demonstrates how to create an single node cluster:
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const smallest = databricks.getNodeType({
* localDisk: true,
* });
* const latestLts = databricks.getSparkVersion({
* longTermSupport: true,
* });
* const singleNode = new databricks.Cluster("single_node", {
* clusterName: "Single Node",
* sparkVersion: latestLts.then(latestLts => latestLts.id),
* nodeTypeId: smallest.then(smallest => smallest.id),
* autoterminationMinutes: 20,
* sparkConf: {
* "spark.databricks.cluster.profile": "singleNode",
* "spark.master": "local[*]",
* },
* customTags: {
* ResourceClass: "SingleNode",
* },
* });
* ```
*/
maxWorkers?: number;
/**
* The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation.
*/
minWorkers?: number;
}
export interface ClusterAwsAttributes {
/**
* Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT`, `SPOT_WITH_FALLBACK` and `ON_DEMAND`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. Backend default value is `SPOT_WITH_FALLBACK` and could change in the future
*/
availability?: string;
/**
* The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden.
*/
ebsVolumeCount?: number;
ebsVolumeIops?: number;
/**
* The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized).
*/
ebsVolumeSize?: number;
ebsVolumeThroughput?: number;
/**
* The type of EBS volumes that will be launched with this cluster. Valid values are `GENERAL_PURPOSE_SSD` or `THROUGHPUT_OPTIMIZED_HDD`. Use this option only if you're not picking *Delta Optimized `i3.*`* node types.
*/
ebsVolumeType?: string;
/**
* The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. If unspecified, the default value is 0.
*/
firstOnDemand?: number;
/**
* Nodes for this cluster will only be placed on AWS instances with this instance profile. Please see databricks.InstanceProfile resource documentation for extended examples on adding a valid instance profile using Pulumi.
*/
instanceProfileArn?: string;
/**
* The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new `i3.xlarge` spot instance, then the max price is half of the price of on-demand `i3.xlarge` instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand `i3.xlarge` instances. If not specified, the default value is `100`. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than `10000`.
*/
spotBidPricePercent?: number;
/**
* Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like `us-west-2a`. The provided availability zone must be in the same region as the Databricks deployment. For example, `us-west-2a` is not a valid zone ID if the Databricks deployment resides in the `us-east-1` region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value `auto`. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors.
*/
zoneId?: string;
}
export interface ClusterAzureAttributes {
/**
* Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`, and `ON_DEMAND_AZURE`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster.
*/
availability?: string;
/**
* The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster.
*/
firstOnDemand?: number;
logAnalyticsInfo?: outputs.ClusterAzureAttributesLogAnalyticsInfo;
/**
* The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to `-1`, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance.
*/
spotBidMaxPrice?: number;
}
export interface ClusterAzureAttributesLogAnalyticsInfo {
logAnalyticsPrimaryKey?: string;
logAnalyticsWorkspaceId?: string;
}
export interface ClusterClusterLogConf {
dbfs?: outputs.ClusterClusterLogConfDbfs;
s3?: outputs.ClusterClusterLogConfS3;
volumes?: outputs.ClusterClusterLogConfVolumes;
}
export interface ClusterClusterLogConfDbfs {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterClusterLogConfS3 {
/**
* Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs.
*/
cannedAcl?: string;
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
/**
* Enable server-side encryption, false by default.
*/
enableEncryption?: boolean;
/**
* The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`.
*/
encryptionType?: string;
/**
* S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used.
*/
endpoint?: string;
/**
* KMS key used if encryption is enabled and encryption type is set to `sse-kms`.
*/
kmsKey?: string;
/**
* S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used.
*/
region?: string;
}
export interface ClusterClusterLogConfVolumes {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterClusterMountInfo {
/**
* path inside the Spark container.
*
* For example, you can mount Azure Data Lake Storage container using the following code:
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const storageAccount = "ewfw3ggwegwg";
* const storageContainer = "test";
* const withNfs = new databricks.Cluster("with_nfs", {clusterMountInfos: [{
* networkFilesystemInfo: {
* serverAddress: `${storageAccount}.blob.core.windows.net`,
* mountOptions: "sec=sys,vers=3,nolock,proto=tcp",
* },
* remoteMountDirPath: `${storageAccount}/${storageContainer}`,
* localMountDirPath: "/mnt/nfs-test",
* }]});
* ```
*/
localMountDirPath: string;
/**
* block specifying connection. It consists of:
*/
networkFilesystemInfo: outputs.ClusterClusterMountInfoNetworkFilesystemInfo;
/**
* string specifying path to mount on the remote service.
*/
remoteMountDirPath?: string;
}
export interface ClusterClusterMountInfoNetworkFilesystemInfo {
/**
* string that will be passed as options passed to the `mount` command.
*/
mountOptions?: string;
/**
* host name.
*/
serverAddress: string;
}
export interface ClusterDockerImage {
/**
* `basic_auth.username` and `basic_auth.password` for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. However, other authenticated and authorized API users of this workspace can access the username and password.
*
* Example usage with azurermContainerRegistry and docker_registry_image, that you can adapt to your specific use-case:
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
* import * as docker from "@pulumi/docker";
*
* const _this = new docker.index.RegistryImage("this", {
* build: [{}],
* name: `${thisAzurermContainerRegistry.loginServer}/sample:latest`,
* });
* const thisCluster = new databricks.Cluster("this", {dockerImage: {
* url: _this.name,
* basicAuth: {
* username: thisAzurermContainerRegistry.adminUsername,
* password: thisAzurermContainerRegistry.adminPassword,
* },
* }});
* ```
*/
basicAuth?: outputs.ClusterDockerImageBasicAuth;
/**
* URL for the Docker image
*/
url: string;
}
export interface ClusterDockerImageBasicAuth {
password: string;
username: string;
}
export interface ClusterGcpAttributes {
/**
* Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`.
*/
availability?: string;
/**
* Boot disk size in GB
*/
bootDiskSize?: number;
/**
* Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources.
*/
googleServiceAccount?: string;
/**
* Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster.
*/
localSsdCount?: number;
/**
* if we should use preemptible executors ([GCP documentation](https://cloud.google.com/compute/docs/instances/preemptible)). *Warning: this field is deprecated in favor of `availability`, and will be removed soon.*
*/
usePreemptibleExecutors?: boolean;
/**
* Identifier for the availability zone in which the cluster resides. This can be one of the following:
* * `HA` (default): High availability, spread nodes across availability zones for a Databricks deployment region.
* * `AUTO`: Databricks picks an availability zone to schedule the cluster on.
* * name of a GCP availability zone: pick one of the available zones from the [list of available availability zones](https://cloud.google.com/compute/docs/regions-zones#available).
*/
zoneId?: string;
}
export interface ClusterInitScript {
abfss?: outputs.ClusterInitScriptAbfss;
/**
* @deprecated For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'.
*/
dbfs?: outputs.ClusterInitScriptDbfs;
file?: outputs.ClusterInitScriptFile;
gcs?: outputs.ClusterInitScriptGcs;
s3?: outputs.ClusterInitScriptS3;
volumes?: outputs.ClusterInitScriptVolumes;
workspace?: outputs.ClusterInitScriptWorkspace;
}
export interface ClusterInitScriptAbfss {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterInitScriptDbfs {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterInitScriptFile {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterInitScriptGcs {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterInitScriptS3 {
/**
* Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs.
*/
cannedAcl?: string;
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
/**
* Enable server-side encryption, false by default.
*/
enableEncryption?: boolean;
/**
* The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`.
*/
encryptionType?: string;
/**
* S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used.
*/
endpoint?: string;
/**
* KMS key used if encryption is enabled and encryption type is set to `sse-kms`.
*/
kmsKey?: string;
/**
* S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used.
*/
region?: string;
}
export interface ClusterInitScriptVolumes {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterInitScriptWorkspace {
/**
* S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys.
*/
destination: string;
}
export interface ClusterLibrary {
cran?: outputs.ClusterLibraryCran;
egg?: string;
jar?: string;
maven?: outputs.ClusterLibraryMaven;
pypi?: outputs.ClusterLibraryPypi;
requirements?: string;
whl?: string;
}
export interface ClusterLibraryCran {
package: string;
repo?: string;
}
export interface ClusterLibraryMaven {
coordinates: string;
exclusions?: string[];
repo?: string;
}
export interface ClusterLibraryPypi {
package: string;
repo?: string;
}
export interface ClusterPolicyLibrary {
cran?: outputs.ClusterPolicyLibraryCran;
egg?: string;
jar?: string;
maven?: outputs.ClusterPolicyLibraryMaven;
pypi?: outputs.ClusterPolicyLibraryPypi;
requirements?: string;
whl?: string;
}
export interface ClusterPolicyLibraryCran {
package: string;
repo?: string;
}
export interface ClusterPolicyLibraryMaven {
coordinates: string;
exclusions?: string[];
repo?: string;
}
export interface ClusterPolicyLibraryPypi {
package: string;
repo?: string;
}
export interface ClusterWorkloadType {
clients: outputs.ClusterWorkloadTypeClients;
}
export interface ClusterWorkloadTypeClients {
/**
* boolean flag defining if it's possible to run Databricks Jobs on this cluster. Default: `true`.
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const withNfs = new databricks.Cluster("with_nfs", {workloadType: {
* clients: {
* jobs: false,
* notebooks: true,
* },
* }});
* ```
*/
jobs?: boolean;
/**
* boolean flag defining if it's possible to run notebooks on this cluster. Default: `true`.
*/
notebooks?: boolean;
}
export interface ComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace {
complianceStandards: string[];
isEnabled: boolean;
}
export interface ConnectionProvisioningInfo {
state?: string;
}
export interface CredentialAwsIamRole {
externalId: string;
/**
* The Amazon Resource Name (ARN) of the AWS IAM role you want to use to setup the trust policy, of the form `arn:aws:iam::1234567890:role/MyRole-AJJHDSKSDF`
*
* `azureManagedIdentity` optional configuration block for using managed identity as credential details for Azure (recommended over `azureServicePrincipal`):
*/
roleArn?: string;
unityCatalogIamArn: string;
}
export interface CredentialAzureManagedIdentity {
/**
* The Resource ID of the Azure Databricks Access Connector resource, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.Databricks/accessConnectors/connector-name`.
*/
accessConnectorId: string;
/**
* Unique ID of the credential.
*/
credentialId: string;
/**
* The Resource ID of the Azure User Assigned Managed Identity associated with Azure Databricks Access Connector, of the form `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-name/providers/Microsoft.ManagedIdentity/userAssignedIdentities/user-managed-identity-name`.
*
* `azureServicePrincipal` optional configuration block to use service principal as credential details for Azure. Only applicable when purpose is `STORAGE` (Legacy):
*/
managedIdentityId?: string;
}
export interface CredentialAzureServicePrincipal {
/**
* The application ID of the application registration within the referenced AAD tenant
*/
applicationId: string;
/**
* The client secret generated for the above app ID in AAD. **This field is redacted on output**
*
* `databricksGcpServiceAccount` optional configuration block for creating a Databricks-managed GCP Service Account:
*/
clientSecret: string;
/**
* The directory ID corresponding to the Azure Active Directory (AAD) tenant of the application
*/
directoryId: string;
}
export interface CredentialDatabricksGcpServiceAccount {
/**
* Unique ID of the credential.
*/
credentialId: string;
/**
* The email of the GCP service account created, to be granted access to relevant buckets.
*/
email: string;
privateKeyId: string;
}
export interface CustomAppIntegrationTokenAccessPolicy {
/**
* access token time to live (TTL) in minutes.
*/
accessTokenTtlInMinutes?: number;
/**
* refresh token TTL in minutes. The TTL of refresh token cannot be lower than TTL of access token.
*/
refreshTokenTtlInMinutes?: number;
}
export interface DefaultNamespaceSettingNamespace {
/**
* The value for the setting.
*/
value?: string;
}
export interface DisableLegacyAccessSettingDisableLegacyAccess {
value: boolean;
}
export interface DisableLegacyDbfsSettingDisableLegacyDbfs {
/**
* The boolean value for the setting.
*/
value: boolean;
}
export interface EnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace {
isEnabled: boolean;
}
export interface ExternalLocationEncryptionDetails {
sseEncryptionDetails?: outputs.ExternalLocationEncryptionDetailsSseEncryptionDetails;
}
export interface ExternalLocationEncryptionDetailsSseEncryptionDetails {
algorithm?: string;
awsKmsKeyArn?: string;
}
export interface GetAppApp {
activeDeployment: outputs.GetAppAppActiveDeployment;
/**
* attribute
*/
appStatus: outputs.GetAppAppAppStatus;
/**
* The Budget Policy ID set for this resource.
*/
budgetPolicyId?: string;
/**
* attribute
*/
computeStatus: outputs.GetAppAppComputeStatus;
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
/**
* The default workspace file system path of the source code from which app deployment are created. This field tracks the workspace source code path of the last active deployment.
*/
defaultSourceCodePath: string;
/**
* The description of the resource.
*/
description?: string;
/**
* The effective budget policy ID.
*/
effectiveBudgetPolicyId: string;
/**
* A list of effective api scopes granted to the user access token.
*/
effectiveUserApiScopes: string[];
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* The name of the app.
*/
name: string;
oauth2AppClientId: string;
oauth2AppIntegrationId: string;
pendingDeployment: outputs.GetAppAppPendingDeployment;
/**
* A list of resources that the app have access to.
*/
resources?: outputs.GetAppAppResource[];
servicePrincipalClientId: string;
/**
* id of the app service principal
*/
servicePrincipalId: number;
/**
* name of the app service principal
*/
servicePrincipalName: string;
/**
* The update time of the app.
*/
updateTime: string;
/**
* The email of the user that last updated the app.
*/
updater: string;
/**
* The URL of the app once it is deployed.
*/
url: string;
userApiScopes?: string[];
}
export interface GetAppAppActiveDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.GetAppAppActiveDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.GetAppAppActiveDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface GetAppAppActiveDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface GetAppAppActiveDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppAppAppStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppAppComputeStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppAppPendingDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.GetAppAppPendingDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.GetAppAppPendingDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface GetAppAppPendingDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface GetAppAppPendingDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppAppResource {
/**
* The description of the resource.
*/
description?: string;
/**
* attribute
*/
job?: outputs.GetAppAppResourceJob;
/**
* The name of the app.
*/
name: string;
/**
* attribute
*/
secret?: outputs.GetAppAppResourceSecret;
/**
* attribute
*/
servingEndpoint?: outputs.GetAppAppResourceServingEndpoint;
/**
* attribute
*/
sqlWarehouse?: outputs.GetAppAppResourceSqlWarehouse;
}
export interface GetAppAppResourceJob {
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetAppAppResourceSecret {
/**
* Key of the secret to grant permission on.
*/
key: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
/**
* Scope of the secret to grant permission on.
*/
scope: string;
}
export interface GetAppAppResourceServingEndpoint {
/**
* The name of the app.
*/
name: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetAppAppResourceSqlWarehouse {
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetAppsApp {
activeDeployment: outputs.GetAppsAppActiveDeployment;
/**
* attribute
*/
appStatus: outputs.GetAppsAppAppStatus;
/**
* The Budget Policy ID set for this resource.
*/
budgetPolicyId?: string;
/**
* attribute
*/
computeStatus: outputs.GetAppsAppComputeStatus;
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
/**
* The default workspace file system path of the source code from which app deployment are created. This field tracks the workspace source code path of the last active deployment.
*/
defaultSourceCodePath: string;
/**
* The description of the resource.
*/
description?: string;
/**
* The effective budget policy ID.
*/
effectiveBudgetPolicyId: string;
/**
* A list of effective api scopes granted to the user access token.
*/
effectiveUserApiScopes: string[];
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Name of the serving endpoint to grant permission on.
*/
name: string;
oauth2AppClientId: string;
oauth2AppIntegrationId: string;
pendingDeployment: outputs.GetAppsAppPendingDeployment;
/**
* A list of resources that the app have access to.
*/
resources?: outputs.GetAppsAppResource[];
servicePrincipalClientId: string;
/**
* id of the app service principal
*/
servicePrincipalId: number;
/**
* name of the app service principal
*/
servicePrincipalName: string;
/**
* The update time of the app.
*/
updateTime: string;
/**
* The email of the user that last updated the app.
*/
updater: string;
/**
* The URL of the app once it is deployed.
*/
url: string;
userApiScopes?: string[];
}
export interface GetAppsAppActiveDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.GetAppsAppActiveDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.GetAppsAppActiveDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface GetAppsAppActiveDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface GetAppsAppActiveDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppsAppAppStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppsAppComputeStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppsAppPendingDeployment {
/**
* The creation time of the app.
*/
createTime: string;
/**
* The email of the user that created the app.
*/
creator: string;
deploymentArtifacts: outputs.GetAppsAppPendingDeploymentDeploymentArtifacts;
deploymentId?: string;
mode?: string;
sourceCodePath?: string;
status: outputs.GetAppsAppPendingDeploymentStatus;
/**
* The update time of the app.
*/
updateTime: string;
}
export interface GetAppsAppPendingDeploymentDeploymentArtifacts {
sourceCodePath?: string;
}
export interface GetAppsAppPendingDeploymentStatus {
/**
* Application status message
*/
message: string;
/**
* State of the application.
*/
state: string;
}
export interface GetAppsAppResource {
/**
* The description of the resource.
*/
description?: string;
/**
* attribute
*/
job?: outputs.GetAppsAppResourceJob;
/**
* Name of the serving endpoint to grant permission on.
*/
name: string;
/**
* attribute
*/
secret?: outputs.GetAppsAppResourceSecret;
/**
* attribute
*/
servingEndpoint?: outputs.GetAppsAppResourceServingEndpoint;
/**
* attribute
*/
sqlWarehouse?: outputs.GetAppsAppResourceSqlWarehouse;
}
export interface GetAppsAppResourceJob {
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetAppsAppResourceSecret {
/**
* Key of the secret to grant permission on.
*/
key: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
/**
* Scope of the secret to grant permission on.
*/
scope: string;
}
export interface GetAppsAppResourceServingEndpoint {
/**
* Name of the serving endpoint to grant permission on.
*/
name: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetAppsAppResourceSqlWarehouse {
/**
* Id of the job to grant permission on.
*/
id: string;
/**
* Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`.
*/
permission: string;
}
export interface GetBudgetPoliciesBudgetPolicy {
bindingWorkspaceIds?: number[];
customTags?: outputs.GetBudgetPoliciesBudgetPolicyCustomTag[];
policyId: string;
/**
* The partial name of policies to be filtered on. If unspecified, all policies will be returned.
*/
policyName?: string;
}
export interface GetBudgetPoliciesBudgetPolicyCustomTag {
key: string;
value?: string;
}
export interface GetBudgetPolicyCustomTag {
key: string;
value?: string;
}
export interface GetCatalogCatalogInfo {
browseOnly?: boolean;
/**
* Type of the catalog, e.g. `MANAGED_CATALOG`, `DELTASHARING_CATALOG`, `SYSTEM_CATALOG`,
*/
catalogType?: string;
/**
* Free-form text description
*/
comment?: string;
/**
* The name of the connection to an external data source.
*/
connectionName?: string;
/**
* Time at which this catalog was created, in epoch milliseconds.
*/
createdAt?: number;
/**
* Username of catalog creator.
*/
createdBy?: string;
/**
* object describing applied predictive optimization flag.
*/
effectivePredictiveOptimizationFlag?: outputs.GetCatalogCatalogInfoEffectivePredictiveOptimizationFlag;
/**
* Whether predictive optimization should be enabled for this object and objects under it.
*/
enablePredictiveOptimization?: string;
/**
* The full name of the catalog. Corresponds with the name field.
*/
fullName?: string;
/**
* Whether the current securable is accessible from all workspaces or a specific set of workspaces.
*/
isolationMode?: string;
/**
* Unique identifier of parent metastore.
*/
metastoreId?: string;
/**
* name of the catalog
*/
name?: string;
/**
* A map of key-value properties attached to the securable.
*/
options?: {
[key: string]: string;
};
/**
* Current owner of the catalog
*/
owner?: string;
/**
* A map of key-value properties attached to the securable.
*/
properties?: {
[key: string]: string;
};
/**
* The name of delta sharing provider.
*/
providerName?: string;
provisioningInfo?: outputs.GetCatalogCatalogInfoProvisioningInfo;
/**
* Securable type.
*/
securableType?: string;
/**
* The name of the share under the share provider.
*/
shareName?: string;
/**
* Storage Location URL (full path) for managed tables within catalog.
*/
storageLocation?: string;
/**
* Storage root URL for managed tables within catalog.
*/
storageRoot?: string;
/**
* Time at which this catalog was last