UNPKG

@pulumi/databricks

Version:

A Pulumi package for creating and managing databricks cloud resources.

1,181 lines 600 kB
import * as outputs from "../types/output"; export interface AccessControlRuleSetGrantRule { /** * a list of principals who are granted a role. The following format is supported: * * `users/{username}` (also exposed as `aclPrincipalId` attribute of `databricks.User` resource). * * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `databricks.Group` resource). * * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `databricks.ServicePrincipal` resource). */ principals?: string[]; /** * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page), [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role) or [budget policy permissions](https://docs.databricks.com/aws/en/admin/usage/budget-policies#manage-budget-policy-permissions), depending on the `name` defined: * * `accounts/{account_id}/ruleSets/default` * * `roles/marketplace.admin` - Databricks Marketplace administrator. * * `roles/billing.admin` - Billing administrator. * * `roles/tagPolicy.creator` - Creator of tag policies. * * `roles/tagPolicy.manager` - Manager of tag policies. * * `roles/tagPolicy.assigner` - Assigner of tag policies. * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` * * `roles/group.manager` - Manager of a group. * * `accounts/{account_id}/budgetPolicies/{budget_policy_id}/ruleSets/default` * * `roles/budgetPolicy.manager` - Manager of a budget policy. * * `roles/budgetPolicy.user` - User of a budget policy. * * `accounts/{account_id}/tagPolicies/{tag_policy_id}/ruleSets/default` * * `roles/tagPolicy.manager` - Manager of a specific tag policy. * * `roles/tagPolicy.assigner` - Assigner of a specific tag policy. */ role: string; } export interface AccountFederationPolicyOidcPolicy { /** * The allowed token audiences, as specified in the 'aud' claim of federated tokens. * The audience identifier is intended to represent the recipient of the token. * Can be any non-empty string value. As long as the audience in the token matches * at least one audience in the policy, the token is considered a match. If audiences * is unspecified, defaults to your Databricks account id */ audiences?: string[]; /** * The required token issuer, as specified in the 'iss' claim of federated tokens */ issuer?: string; /** * The public keys used to validate the signature of federated tokens, in JWKS format. * Most use cases should not need to specify this field. If jwksUri and jwksJson * are both unspecified (recommended), Databricks automatically fetches the public * keys from your issuer’s well known endpoint. Databricks strongly recommends * relying on your issuer’s well known endpoint for discovering public keys */ jwksJson?: string; /** * URL of the public keys used to validate the signature of federated tokens, in * JWKS format. Most use cases should not need to specify this field. If jwksUri * and jwksJson are both unspecified (recommended), Databricks automatically * fetches the public keys from your issuer’s well known endpoint. Databricks * strongly recommends relying on your issuer’s well known endpoint for discovering * public keys */ jwksUri?: string; /** * The required token subject, as specified in the subject claim of federated tokens. * Must be specified for service principal federation policies. Must not be specified * for account federation policies */ subject?: string; /** * The claim that contains the subject of the token. If unspecified, the default value * is 'sub' */ subjectClaim?: string; } export interface AccountNetworkPolicyEgress { /** * The access policy enforced for egress traffic to the internet */ networkAccess?: outputs.AccountNetworkPolicyEgressNetworkAccess; } export interface AccountNetworkPolicyEgressNetworkAccess { /** * List of internet destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedInternetDestinations?: outputs.AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination[]; /** * List of storage destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedStorageDestinations?: outputs.AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination[]; /** * Optional. When policyEnforcement is not provided, we default to ENFORCE_MODE_ALL_SERVICES */ policyEnforcement?: outputs.AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement; /** * The restriction mode that controls how serverless workloads can access the internet. Possible values are: `FULL_ACCESS`, `RESTRICTED_ACCESS` */ restrictionMode: string; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination { /** * The internet destination to which access will be allowed. Format dependent on the destination type */ destination?: string; /** * The type of internet destination. Currently only DNS_NAME is supported. Possible values are: `DNS_NAME` */ internetDestinationType?: string; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination { /** * The Azure storage account name */ azureStorageAccount?: string; /** * The Azure storage service type (blob, dfs, etc.) */ azureStorageService?: string; bucketName?: string; region?: string; /** * The type of storage destination. Possible values are: `AWS_S3`, `AZURE_STORAGE`, `GOOGLE_CLOUD_STORAGE` */ storageDestinationType?: string; } export interface AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement { /** * When empty, it means dry run for all products. * When non-empty, it means dry run for specific products and for the other products, they will run in enforced mode */ dryRunModeProductFilters?: string[]; /** * The mode of policy enforcement. ENFORCED blocks traffic that violates policy, * while DRY_RUN only logs violations without blocking. When not specified, * defaults to ENFORCED. Possible values are: `DRY_RUN`, `ENFORCED` */ enforcementMode?: string; } export interface AccountSettingV2AibiDashboardEmbeddingAccessPolicy { /** * Possible values are: `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS` */ accessPolicyType: string; } export interface AccountSettingV2AibiDashboardEmbeddingApprovedDomains { approvedDomains?: string[]; } export interface AccountSettingV2AutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled?: boolean; enablementDetails?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceEnablementDetails { /** * The feature is force enabled if compliance mode is active */ forcedForComplianceMode?: boolean; /** * The feature is unavailable if the corresponding entitlement disabled (see getShieldEntitlementEnable) */ unavailableForDisabledEntitlement?: boolean; /** * The feature is unavailable if the customer doesn't have enterprise tier */ unavailableForNonEnterpriseTier?: boolean; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { /** * Possible values are: `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY` */ dayOfWeek?: string; /** * Possible values are: `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH` */ frequency?: string; windowStartTime?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours?: number; minutes?: number; } export interface AccountSettingV2BooleanVal { value?: boolean; } export interface AccountSettingV2EffectiveAibiDashboardEmbeddingAccessPolicy { /** * Possible values are: `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS` */ accessPolicyType: string; } export interface AccountSettingV2EffectiveAibiDashboardEmbeddingApprovedDomains { approvedDomains?: string[]; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled?: boolean; enablementDetails?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceEnablementDetails { /** * The feature is force enabled if compliance mode is active */ forcedForComplianceMode?: boolean; /** * The feature is unavailable if the corresponding entitlement disabled (see getShieldEntitlementEnable) */ unavailableForDisabledEntitlement?: boolean; /** * The feature is unavailable if the customer doesn't have enterprise tier */ unavailableForNonEnterpriseTier?: boolean; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { /** * Possible values are: `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY` */ dayOfWeek?: string; /** * Possible values are: `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH` */ frequency?: string; windowStartTime?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours?: number; minutes?: number; } export interface AccountSettingV2EffectiveBooleanVal { value?: boolean; } export interface AccountSettingV2EffectiveIntegerVal { value?: number; } export interface AccountSettingV2EffectivePersonalCompute { value?: string; } export interface AccountSettingV2EffectiveRestrictWorkspaceAdmins { /** * Possible values are: `ALLOW_ALL`, `RESTRICT_TOKENS_AND_JOB_RUN_AS` */ status: string; } export interface AccountSettingV2EffectiveStringVal { value?: string; } export interface AccountSettingV2IntegerVal { value?: number; } export interface AccountSettingV2PersonalCompute { value?: string; } export interface AccountSettingV2RestrictWorkspaceAdmins { /** * Possible values are: `ALLOW_ALL`, `RESTRICT_TOKENS_AND_JOB_RUN_AS` */ status: string; } export interface AccountSettingV2StringVal { value?: string; } export interface AibiDashboardEmbeddingAccessPolicySettingAibiDashboardEmbeddingAccessPolicy { /** * Configured embedding policy. Possible values are `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`. */ accessPolicyType: string; } export interface AibiDashboardEmbeddingApprovedDomainsSettingAibiDashboardEmbeddingApprovedDomains { /** * the list of approved domains. To allow all subdomains for a given domain, use a wildcard symbol (`*`) before the domain name, i.e., `*.databricks.com` will allow to embed into any site under the `databricks.com`. */ approvedDomains: string[]; } export interface AlertCondition { /** * Alert state if the result is empty (`UNKNOWN`, `OK`, `TRIGGERED`) */ emptyResultState?: string; /** * Operator used for comparison in alert evaluation. (Enum: `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `EQUAL`, `NOT_EQUAL`, `IS_NULL`) */ op: string; /** * Name of the column from the query result to use for comparison in alert evaluation: */ operand: outputs.AlertConditionOperand; /** * Threshold value used for comparison in alert evaluation: */ threshold?: outputs.AlertConditionThreshold; } export interface AlertConditionOperand { /** * Block describing the column from the query result to use for comparison in alert evaluation: */ column: outputs.AlertConditionOperandColumn; } export interface AlertConditionOperandColumn { /** * Name of the column. */ name: string; } export interface AlertConditionThreshold { /** * actual value used in comparison (one of the attributes is required): */ value: outputs.AlertConditionThresholdValue; } export interface AlertConditionThresholdValue { /** * boolean value (`true` or `false`) to compare against boolean results. */ boolValue?: boolean; /** * double value to compare against integer and double results. */ doubleValue?: number; /** * string value to compare against string results. */ stringValue?: string; } export interface AlertV2EffectiveRunAs { /** * Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role */ servicePrincipalName?: string; /** * The email of an active workspace user. Can only set this field to their own email */ userName?: string; } export interface AlertV2Evaluation { /** * Operator used for comparison in alert evaluation. Possible values are: `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NOT_NULL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL` */ comparisonOperator: string; /** * Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because `UNKNOWN` state is planned to be deprecated. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ emptyResultState?: string; /** * (string) - Timestamp of the last evaluation */ lastEvaluatedAt: string; /** * User or Notification Destination to notify when alert is triggered */ notification?: outputs.AlertV2EvaluationNotification; /** * Source column from result to use to evaluate alert */ source: outputs.AlertV2EvaluationSource; /** * (string) - Latest state of alert evaluation. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ state: string; /** * Threshold to user for alert evaluation, can be a column or a value */ threshold?: outputs.AlertV2EvaluationThreshold; } export interface AlertV2EvaluationNotification { effectiveNotifyOnOk: boolean; effectiveRetriggerSeconds: number; /** * Whether to notify alert subscribers when alert returns back to normal */ notifyOnOk?: boolean; /** * Number of seconds an alert waits after being triggered before it is allowed to send another notification. * If set to 0 or omitted, the alert will not send any further notifications after the first trigger * Setting this value to 1 allows the alert to send a notification on every evaluation where the condition is met, effectively making it always retrigger for notification purposes */ retriggerSeconds?: number; subscriptions?: outputs.AlertV2EvaluationNotificationSubscription[]; } export interface AlertV2EvaluationNotificationSubscription { destinationId?: string; userEmail?: string; } export interface AlertV2EvaluationSource { /** * If not set, the behavior is equivalent to using `First row` in the UI. Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: string; display?: string; name: string; } export interface AlertV2EvaluationThreshold { column?: outputs.AlertV2EvaluationThresholdColumn; value?: outputs.AlertV2EvaluationThresholdValue; } export interface AlertV2EvaluationThresholdColumn { /** * If not set, the behavior is equivalent to using `First row` in the UI. Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: string; display?: string; name: string; } export interface AlertV2EvaluationThresholdValue { boolValue?: boolean; doubleValue?: number; stringValue?: string; } export interface AlertV2RunAs { /** * Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role */ servicePrincipalName?: string; /** * The email of an active workspace user. Can only set this field to their own email */ userName?: string; } export interface AlertV2Schedule { /** * Indicate whether this schedule is paused or not. Possible values are: `PAUSED`, `UNPAUSED` */ pauseStatus?: string; /** * A cron expression using quartz syntax that specifies the schedule for this pipeline. * Should use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html */ quartzCronSchedule: string; /** * A Java timezone id. The schedule will be resolved using this timezone. * This will be combined with the quartzCronSchedule to determine the schedule. * See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details */ timezoneId: string; } export interface AppActiveDeployment { /** * The creation time of the app. */ createTime: string; /** * The email of the user that created the app. */ creator: string; deploymentArtifacts: outputs.AppActiveDeploymentDeploymentArtifacts; deploymentId?: string; mode?: string; sourceCodePath?: string; status: outputs.AppActiveDeploymentStatus; /** * The update time of the app. */ updateTime: string; } export interface AppActiveDeploymentDeploymentArtifacts { sourceCodePath?: string; } export interface AppActiveDeploymentStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppAppStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppComputeStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppPendingDeployment { /** * The creation time of the app. */ createTime: string; /** * The email of the user that created the app. */ creator: string; deploymentArtifacts: outputs.AppPendingDeploymentDeploymentArtifacts; deploymentId?: string; mode?: string; sourceCodePath?: string; status: outputs.AppPendingDeploymentStatus; /** * The update time of the app. */ updateTime: string; } export interface AppPendingDeploymentDeploymentArtifacts { sourceCodePath?: string; } export interface AppPendingDeploymentStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppProviderConfig { workspaceId: string; } export interface AppResource { /** * attribute */ database?: outputs.AppResourceDatabase; /** * The description of the resource. * * Exactly one of the following attributes must be provided: */ description?: string; /** * attribute */ genieSpace?: outputs.AppResourceGenieSpace; /** * attribute */ job?: outputs.AppResourceJob; /** * The name of the resource. */ name: string; /** * attribute */ secret?: outputs.AppResourceSecret; /** * attribute */ servingEndpoint?: outputs.AppResourceServingEndpoint; /** * attribute */ sqlWarehouse?: outputs.AppResourceSqlWarehouse; /** * attribute (see the [API docs](https://docs.databricks.com/api/workspace/apps/create#resources-uc_securable) for full list of supported UC objects) */ ucSecurable?: outputs.AppResourceUcSecurable; } export interface AppResourceDatabase { /** * The name of database. */ databaseName: string; /** * The name of database instance. */ instanceName: string; /** * Permission to grant on database. Supported permissions are: `CAN_CONNECT_AND_CREATE`. */ permission: string; } export interface AppResourceGenieSpace { /** * The name of Genie Space. */ name: string; permission: string; /** * The unique ID of Genie Space. */ spaceId: string; } export interface AppResourceJob { /** * Id of the job to grant permission on. */ id: string; /** * Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`. */ permission: string; } export interface AppResourceSecret { /** * Key of the secret to grant permission on. */ key: string; /** * Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: `READ`, `WRITE`, `MANAGE`. */ permission: string; /** * Scope of the secret to grant permission on. */ scope: string; } export interface AppResourceServingEndpoint { /** * Name of the serving endpoint to grant permission on. */ name: string; /** * Permission to grant on the serving endpoint. Supported permissions are: `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`. */ permission: string; } export interface AppResourceSqlWarehouse { /** * Id of the SQL warehouse to grant permission on. */ id: string; /** * Permission to grant on the SQL warehouse. Supported permissions are: `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`. */ permission: string; } export interface AppResourceUcSecurable { /** * Permissions to grant on UC securable, i.e. `READ_VOLUME`, `WRITE_VOLUME`. */ permission: string; /** * the full name of UC securable, i.e. `my-catalog.my-schema.my-volume`. */ securableFullName: string; /** * the type of UC securable, i.e. `VOLUME`. */ securableType: string; } export interface AppsSettingsCustomTemplateManifest { /** * The description of the template */ description?: string; /** * The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and whitespaces. * It must be unique within the workspace */ name: string; resourceSpecs?: outputs.AppsSettingsCustomTemplateManifestResourceSpec[]; /** * The manifest schema version, for now only 1 is allowed */ version: number; } export interface AppsSettingsCustomTemplateManifestResourceSpec { /** * The description of the template */ description?: string; jobSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecJobSpec; /** * The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and whitespaces. * It must be unique within the workspace */ name: string; secretSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecSecretSpec; servingEndpointSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecServingEndpointSpec; sqlWarehouseSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecSqlWarehouseSpec; ucSecurableSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecUcSecurableSpec; } export interface AppsSettingsCustomTemplateManifestResourceSpecJobSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecSecretSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecServingEndpointSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecSqlWarehouseSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecUcSecurableSpec { permission: string; /** * Possible values are: `TABLE`, `VOLUME` */ securableType: string; } export interface ArtifactAllowlistArtifactMatcher { /** * The artifact path or maven coordinate. */ artifact: string; /** * The pattern matching type of the artifact. Only `PREFIX_MATCH` is supported. */ matchType: string; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled: boolean; enablementDetails: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails { forcedForComplianceMode?: boolean; unavailableForDisabledEntitlement?: boolean; unavailableForNonEnterpriseTier?: boolean; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { dayOfWeek: string; frequency: string; windowStartTime?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours: number; minutes: number; } export interface BudgetAlertConfiguration { /** * List of action configurations to take when the budget alert is triggered. Consists of the following fields: */ actionConfigurations?: outputs.BudgetAlertConfigurationActionConfiguration[]; alertConfigurationId: string; /** * The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantityType`. */ quantityThreshold?: string; /** * The way to calculate cost for this budget alert. This is what quantityThreshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`) */ quantityType?: string; /** * The time window of usage data for the budget. (Enum: `MONTH`) */ timePeriod?: string; /** * The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`) */ triggerType?: string; } export interface BudgetAlertConfigurationActionConfiguration { actionConfigurationId: string; /** * The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`) */ actionType?: string; /** * The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to. */ target?: string; } export interface BudgetFilter { /** * List of tags to filter by. Consists of the following fields: */ tags?: outputs.BudgetFilterTag[]; /** * Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields: */ workspaceId?: outputs.BudgetFilterWorkspaceId; } export interface BudgetFilterTag { /** * The key of the tag. */ key?: string; /** * Consists of the following fields: */ value?: outputs.BudgetFilterTagValue; } export interface BudgetFilterTagValue { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: string; /** * The values to filter by. */ values?: string[]; } export interface BudgetFilterWorkspaceId { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: string; /** * The values to filter by. */ values?: number[]; } export interface BudgetPolicyCustomTag { /** * The key of the tag. * - Must be unique among all custom tags of the same policy * - Cannot be “budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - * these tags are preserved */ key: string; /** * The value of the tag */ value?: string; } export interface CatalogEffectivePredictiveOptimizationFlag { inheritedFromName?: string; inheritedFromType?: string; value: string; } export interface CatalogProvisioningInfo { state?: string; } export interface ClusterAutoscale { /** * The maximum number of workers to which the cluster can scale up when overloaded. maxWorkers must be strictly greater than min_workers. * * To create a [single node cluster](https://docs.databricks.com/clusters/single-node.html), set `isSingleNode = true` and `kind = "CLASSIC_PREVIEW"` for the cluster. Single-node clusters are suitable for small, non-distributed workloads like single-node machine learning use-cases. * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const smallest = databricks.getNodeType({ * localDisk: true, * }); * const latestLts = databricks.getSparkVersion({ * longTermSupport: true, * }); * const singleNode = new databricks.Cluster("single_node", { * clusterName: "Single Node", * sparkVersion: latestLts.then(latestLts => latestLts.id), * nodeTypeId: smallest.then(smallest => smallest.id), * autoterminationMinutes: 20, * isSingleNode: true, * kind: "CLASSIC_PREVIEW", * }); * ``` */ maxWorkers?: number; /** * The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation. */ minWorkers?: number; } export interface ClusterAwsAttributes { /** * Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT`, `SPOT_WITH_FALLBACK` and `ON_DEMAND`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. Backend default value is `SPOT_WITH_FALLBACK` and could change in the future */ availability?: string; /** * The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden. */ ebsVolumeCount?: number; /** * If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. */ ebsVolumeIops?: number; /** * The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized). */ ebsVolumeSize?: number; /** * If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used. */ ebsVolumeThroughput?: number; /** * The type of EBS volumes that will be launched with this cluster. Valid values are `GENERAL_PURPOSE_SSD` or `THROUGHPUT_OPTIMIZED_HDD`. Use this option only if you're not picking *Delta Optimized `i3.*`* node types. */ ebsVolumeType?: string; /** * The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. If unspecified, the default value is 0. */ firstOnDemand?: number; /** * Nodes for this cluster will only be placed on AWS instances with this instance profile. Please see databricks.InstanceProfile resource documentation for extended examples on adding a valid instance profile using Pulumi. */ instanceProfileArn?: string; /** * The max price for AWS spot instances, as a percentage of the corresponding instance type’s on-demand price. For example, if this field is set to 50, and the cluster needs a new `i3.xlarge` spot instance, then the max price is half of the price of on-demand `i3.xlarge` instances. Similarly, if this field is set to 200, the max price is twice the price of on-demand `i3.xlarge` instances. If not specified, the default value is `100`. When spot instances are requested for this cluster, only spot instances whose max price percentage matches this field will be considered. For safety, we enforce this field to be no more than `10000`. */ spotBidPricePercent?: number; /** * Identifier for the availability zone/datacenter in which the cluster resides. This string will be of a form like `us-west-2a`. The provided availability zone must be in the same region as the Databricks deployment. For example, `us-west-2a` is not a valid zone ID if the Databricks deployment resides in the `us-east-1` region. Enable automatic availability zone selection ("Auto-AZ"), by setting the value `auto`. Databricks selects the AZ based on available IPs in the workspace subnets and retries in other availability zones if AWS returns insufficient capacity errors. */ zoneId?: string; } export interface ClusterAzureAttributes { /** * Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`, and `ON_DEMAND_AZURE`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. */ availability?: string; /** * The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. */ firstOnDemand?: number; logAnalyticsInfo?: outputs.ClusterAzureAttributesLogAnalyticsInfo; /** * The max bid price used for Azure spot instances. You can set this to greater than or equal to the current spot price. You can also set this to `-1`, which specifies that the instance cannot be evicted on the basis of price. The price for the instance will be the current price for spot instances or the price for a standard instance. */ spotBidMaxPrice?: number; } export interface ClusterAzureAttributesLogAnalyticsInfo { logAnalyticsPrimaryKey?: string; logAnalyticsWorkspaceId?: string; } export interface ClusterClusterLogConf { dbfs?: outputs.ClusterClusterLogConfDbfs; s3?: outputs.ClusterClusterLogConfS3; volumes?: outputs.ClusterClusterLogConfVolumes; } export interface ClusterClusterLogConfDbfs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterClusterLogConfS3 { /** * Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs. */ cannedAcl?: string; /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; /** * Enable server-side encryption, false by default. */ enableEncryption?: boolean; /** * The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`. */ encryptionType?: string; /** * S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used. */ endpoint?: string; /** * KMS key used if encryption is enabled and encryption type is set to `sse-kms`. */ kmsKey?: string; /** * S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used. */ region?: string; } export interface ClusterClusterLogConfVolumes { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterClusterMountInfo { /** * path inside the Spark container. * * For example, you can mount Azure Data Lake Storage container using the following code: * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const storageAccount = "ewfw3ggwegwg"; * const storageContainer = "test"; * const withNfs = new databricks.Cluster("with_nfs", {clusterMountInfos: [{ * networkFilesystemInfo: { * serverAddress: `${storageAccount}.blob.core.windows.net`, * mountOptions: "sec=sys,vers=3,nolock,proto=tcp", * }, * remoteMountDirPath: `${storageAccount}/${storageContainer}`, * localMountDirPath: "/mnt/nfs-test", * }]}); * ``` */ localMountDirPath: string; /** * block specifying connection. It consists of: */ networkFilesystemInfo: outputs.ClusterClusterMountInfoNetworkFilesystemInfo; /** * string specifying path to mount on the remote service. */ remoteMountDirPath?: string; } export interface ClusterClusterMountInfoNetworkFilesystemInfo { /** * string that will be passed as options passed to the `mount` command. */ mountOptions?: string; /** * host name. */ serverAddress: string; } export interface ClusterDockerImage { /** * `basic_auth.username` and `basic_auth.password` for Docker repository. Docker registry credentials are encrypted when they are stored in Databricks internal storage and when they are passed to a registry upon fetching Docker images at cluster launch. For better security, these credentials should be stored in the secret scope and referred using secret path syntax: `{{secrets/scope/key}}`, otherwise other users of the workspace may access them via UI/API. * * Example usage with azurermContainerRegistry and docker_registry_image, that you can adapt to your specific use-case: * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * import * as docker from "@pulumi/docker"; * * const _this = new docker.RegistryImage("this", { * build: [{}], * name: `${thisAzurermContainerRegistry.loginServer}/sample:latest`, * }); * const thisCluster = new databricks.Cluster("this", {dockerImage: { * url: _this.name, * basicAuth: { * username: thisAzurermContainerRegistry.adminUsername, * password: thisAzurermContainerRegistry.adminPassword, * }, * }}); * ``` */ basicAuth?: outputs.ClusterDockerImageBasicAuth; /** * URL for the Docker image */ url: string; } export interface ClusterDockerImageBasicAuth { password: string; username: string; } export interface ClusterGcpAttributes { /** * Availability type used for all nodes. Valid values are `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP` and `ON_DEMAND_GCP`, default: `ON_DEMAND_GCP`. */ availability?: string; /** * Boot disk size in GB */ bootDiskSize?: number; /** * The first `firstOnDemand` nodes of the cluster will be placed on on-demand instances. If this value is greater than 0, the cluster driver node will be placed on an on-demand instance. If this value is greater than or equal to the current cluster size, all nodes will be placed on on-demand instances. If this value is less than the current cluster size, `firstOnDemand` nodes will be placed on on-demand instances, and the remainder will be placed on availability instances. This value does not affect cluster size and cannot be mutated over the lifetime of a cluster. */ firstOnDemand?: number; /** * Google Service Account email address that the cluster uses to authenticate with Google Identity. This field is used for authentication with the GCS and BigQuery data sources. */ googleServiceAccount?: string; /** * Number of local SSD disks (each is 375GB in size) that will be attached to each node of the cluster. */ localSsdCount?: number; /** * if we should use preemptible executors ([GCP documentation](https://cloud.google.com/compute/docs/instances/preemptible)). *Warning: this field is deprecated in favor of `availability`, and will be removed soon.* */ usePreemptibleExecutors?: boolean; /** * Identifier for the availability zone in which the cluster resides. This can be one of the following: * * `HA` (default): High availability, spread nodes across availability zones for a Databricks deployment region. * * `AUTO`: Databricks picks an availability zone to schedule the cluster on. * * name of a GCP availability zone: pick one of the available zones from the [list of available availability zones](https://cloud.google.com/compute/docs/regions-zones#available). */ zoneId?: string; } export interface ClusterInitScript { abfss?: outputs.ClusterInitScriptAbfss; /** * @deprecated For init scripts use 'volumes', 'workspace' or cloud storage location instead of 'dbfs'. */ dbfs?: outputs.ClusterInitScriptDbfs; file?: outputs.ClusterInitScriptFile; gcs?: outputs.ClusterInitScriptGcs; s3?: outputs.ClusterInitScriptS3; volumes?: outputs.ClusterInitScriptVolumes; workspace?: outputs.ClusterInitScriptWorkspace; } export interface ClusterInitScriptAbfss { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterInitScriptDbfs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterInitScriptFile { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterInitScriptGcs { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterInitScriptS3 { /** * Set canned access control list, e.g. `bucket-owner-full-control`. If `cannedCal` is set, the cluster instance profile must have `s3:PutObjectAcl` permission on the destination bucket and prefix. The full list of possible canned ACLs can be found [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). By default, only the object owner gets full control. If you are using a cross-account role for writing data, you may want to set `bucket-owner-full-control` to make bucket owners able to read the logs. */ cannedAcl?: string; /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; /** * Enable server-side encryption, false by default. */ enableEncryption?: boolean; /** * The encryption type, it could be `sse-s3` or `sse-kms`. It is used only when encryption is enabled, and the default type is `sse-s3`. */ encryptionType?: string; /** * S3 endpoint, e.g. <https://s3-us-west-2.amazonaws.com>. Either `region` or `endpoint` needs to be set. If both are set, the endpoint is used. */ endpoint?: string; /** * KMS key used if encryption is enabled and encryption type is set to `sse-kms`. */ kmsKey?: string; /** * S3 region, e.g. `us-west-2`. Either `region` or `endpoint` must be set. If both are set, the endpoint is used. */ region?: string; } export interface ClusterInitScriptVolumes { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterInitScriptWorkspace { /** * S3 destination, e.g., `s3://my-bucket/some-prefix` You must configure the cluster with an instance profile, and the instance profile must have write access to the destination. You cannot use AWS keys. */ destination: string; } export interface ClusterLibrary { cran?: outputs.ClusterLibraryCran; /** * @deprecated The `egg` library type is deprecated. Please use `whl` or `pypi` instead. */ egg?: string; jar?: string; maven?: outputs.ClusterLibraryMaven; pypi?: outputs.ClusterLibraryPypi; requirements?: string; whl?: string; } export interface ClusterLibraryCran { package: string; repo?: string; } export interface ClusterLibraryMaven { coordinates: string; exclusions?: string[]; repo?: string; } export interface ClusterLibraryPypi { pack