UNPKG

@pulumi/databricks

Version:

A Pulumi package for creating and managing databricks cloud resources.

1,318 lines 611 kB
import * as outputs from "../types/output"; export interface AccessControlRuleSetGrantRule { /** * a list of principals who are granted a role. The following format is supported: * * `users/{username}` (also exposed as `aclPrincipalId` attribute of `databricks.User` resource). * * `groups/{groupname}` (also exposed as `aclPrincipalId` attribute of `databricks.Group` resource). * * `servicePrincipals/{applicationId}` (also exposed as `aclPrincipalId` attribute of `databricks.ServicePrincipal` resource). */ principals?: string[]; /** * Role to be granted. The supported roles are listed below. For more information about these roles, refer to [service principal roles](https://docs.databricks.com/security/auth-authz/access-control/service-principal-acl.html#service-principal-roles), [group roles](https://docs.databricks.com/en/administration-guide/users-groups/groups.html#manage-roles-on-an-account-group-using-the-workspace-admin-settings-page), [marketplace roles](https://docs.databricks.com/en/marketplace/get-started-provider.html#assign-the-marketplace-admin-role) or [budget policy permissions](https://docs.databricks.com/aws/en/admin/usage/budget-policies#manage-budget-policy-permissions), depending on the `name` defined: * * `accounts/{account_id}/ruleSets/default` * * `roles/marketplace.admin` - Databricks Marketplace administrator. * * `roles/billing.admin` - Billing administrator. * * `accounts/{account_id}/servicePrincipals/{service_principal_application_id}/ruleSets/default` * * `roles/servicePrincipal.manager` - Manager of a service principal. * * `roles/servicePrincipal.user` - User of a service principal. * * `accounts/{account_id}/groups/{group_id}/ruleSets/default` * * `roles/group.manager` - Manager of a group. * * `accounts/{account_id}/budgetPolicies/{budget_policy_id}/ruleSets/default` * * `roles/budgetPolicy.manager` - Manager of a budget policy. * * `roles/budgetPolicy.user` - User of a budget policy. */ role: string; } export interface AccountFederationPolicyOidcPolicy { /** * The allowed token audiences, as specified in the 'aud' claim of federated tokens. * The audience identifier is intended to represent the recipient of the token. * Can be any non-empty string value. As long as the audience in the token matches * at least one audience in the policy, the token is considered a match. If audiences * is unspecified, defaults to your Databricks account id */ audiences?: string[]; /** * The required token issuer, as specified in the 'iss' claim of federated tokens */ issuer?: string; /** * The public keys used to validate the signature of federated tokens, in JWKS format. * Most use cases should not need to specify this field. If jwksUri and jwksJson * are both unspecified (recommended), Databricks automatically fetches the public * keys from your issuer’s well known endpoint. Databricks strongly recommends * relying on your issuer’s well known endpoint for discovering public keys */ jwksJson?: string; /** * URL of the public keys used to validate the signature of federated tokens, in * JWKS format. Most use cases should not need to specify this field. If jwksUri * and jwksJson are both unspecified (recommended), Databricks automatically * fetches the public keys from your issuer’s well known endpoint. Databricks * strongly recommends relying on your issuer’s well known endpoint for discovering * public keys */ jwksUri?: string; /** * The required token subject, as specified in the subject claim of federated tokens. * Must be specified for service principal federation policies. Must not be specified * for account federation policies */ subject?: string; /** * The claim that contains the subject of the token. If unspecified, the default value * is 'sub' */ subjectClaim?: string; } export interface AccountNetworkPolicyEgress { /** * The access policy enforced for egress traffic to the internet */ networkAccess?: outputs.AccountNetworkPolicyEgressNetworkAccess; } export interface AccountNetworkPolicyEgressNetworkAccess { /** * List of internet destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedInternetDestinations?: outputs.AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination[]; /** * List of storage destinations that serverless workloads are allowed to access when in RESTRICTED_ACCESS mode */ allowedStorageDestinations?: outputs.AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination[]; /** * Optional. When policyEnforcement is not provided, we default to ENFORCE_MODE_ALL_SERVICES */ policyEnforcement?: outputs.AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement; /** * The restriction mode that controls how serverless workloads can access the internet. Possible values are: `FULL_ACCESS`, `RESTRICTED_ACCESS` */ restrictionMode: string; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedInternetDestination { /** * The internet destination to which access will be allowed. Format dependent on the destination type */ destination?: string; /** * The type of internet destination. Currently only DNS_NAME is supported. Possible values are: `DNS_NAME` */ internetDestinationType?: string; } export interface AccountNetworkPolicyEgressNetworkAccessAllowedStorageDestination { /** * The Azure storage account name */ azureStorageAccount?: string; /** * The Azure storage service type (blob, dfs, etc.) */ azureStorageService?: string; bucketName?: string; region?: string; /** * The type of storage destination. Possible values are: `AWS_S3`, `AZURE_STORAGE`, `GOOGLE_CLOUD_STORAGE` */ storageDestinationType?: string; } export interface AccountNetworkPolicyEgressNetworkAccessPolicyEnforcement { /** * When empty, it means dry run for all products. * When non-empty, it means dry run for specific products and for the other products, they will run in enforced mode */ dryRunModeProductFilters?: string[]; /** * The mode of policy enforcement. ENFORCED blocks traffic that violates policy, * while DRY_RUN only logs violations without blocking. When not specified, * defaults to ENFORCED. Possible values are: `DRY_RUN`, `ENFORCED` */ enforcementMode?: string; } export interface AccountSettingV2AibiDashboardEmbeddingAccessPolicy { /** * . Possible values are: `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS` */ accessPolicyType: string; } export interface AccountSettingV2AibiDashboardEmbeddingApprovedDomains { approvedDomains?: string[]; } export interface AccountSettingV2AutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled?: boolean; enablementDetails?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceEnablementDetails { /** * The feature is force enabled if compliance mode is active */ forcedForComplianceMode?: boolean; /** * The feature is unavailable if the corresponding entitlement disabled (see getShieldEntitlementEnable) */ unavailableForDisabledEntitlement?: boolean; /** * The feature is unavailable if the customer doesn't have enterprise tier */ unavailableForNonEnterpriseTier?: boolean; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { /** * . Possible values are: `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY` */ dayOfWeek?: string; /** * . Possible values are: `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH` */ frequency?: string; windowStartTime?: outputs.AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AccountSettingV2AutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours?: number; minutes?: number; } export interface AccountSettingV2BooleanVal { value?: boolean; } export interface AccountSettingV2DefaultDataSecurityMode { status: string; } export interface AccountSettingV2EffectiveAibiDashboardEmbeddingAccessPolicy { /** * . Possible values are: `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS` */ accessPolicyType: string; } export interface AccountSettingV2EffectiveAibiDashboardEmbeddingApprovedDomains { approvedDomains?: string[]; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled?: boolean; enablementDetails?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceEnablementDetails { /** * The feature is force enabled if compliance mode is active */ forcedForComplianceMode?: boolean; /** * The feature is unavailable if the corresponding entitlement disabled (see getShieldEntitlementEnable) */ unavailableForDisabledEntitlement?: boolean; /** * The feature is unavailable if the customer doesn't have enterprise tier */ unavailableForNonEnterpriseTier?: boolean; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { /** * . Possible values are: `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY` */ dayOfWeek?: string; /** * . Possible values are: `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH` */ frequency?: string; windowStartTime?: outputs.AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AccountSettingV2EffectiveAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours?: number; minutes?: number; } export interface AccountSettingV2EffectiveBooleanVal { value?: boolean; } export interface AccountSettingV2EffectiveDefaultDataSecurityMode { status: string; } export interface AccountSettingV2EffectiveIntegerVal { value?: number; } export interface AccountSettingV2EffectivePersonalCompute { value?: string; } export interface AccountSettingV2EffectiveRestrictWorkspaceAdmins { status: string; } export interface AccountSettingV2EffectiveStringVal { value?: string; } export interface AccountSettingV2IntegerVal { value?: number; } export interface AccountSettingV2PersonalCompute { value?: string; } export interface AccountSettingV2RestrictWorkspaceAdmins { status: string; } export interface AccountSettingV2StringVal { value?: string; } export interface AibiDashboardEmbeddingAccessPolicySettingAibiDashboardEmbeddingAccessPolicy { /** * Configured embedding policy. Possible values are `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`. */ accessPolicyType: string; } export interface AibiDashboardEmbeddingApprovedDomainsSettingAibiDashboardEmbeddingApprovedDomains { /** * the list of approved domains. To allow all subdomains for a given domain, use a wildcard symbol (`*`) before the domain name, i.e., `*.databricks.com` will allow to embed into any site under the `databricks.com`. */ approvedDomains: string[]; } export interface AlertCondition { /** * Alert state if the result is empty (`UNKNOWN`, `OK`, `TRIGGERED`) */ emptyResultState?: string; /** * Operator used for comparison in alert evaluation. (Enum: `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `EQUAL`, `NOT_EQUAL`, `IS_NULL`) */ op: string; /** * Name of the column from the query result to use for comparison in alert evaluation: */ operand: outputs.AlertConditionOperand; /** * Threshold value used for comparison in alert evaluation: */ threshold?: outputs.AlertConditionThreshold; } export interface AlertConditionOperand { /** * Block describing the column from the query result to use for comparison in alert evaluation: */ column: outputs.AlertConditionOperandColumn; } export interface AlertConditionOperandColumn { /** * Name of the column. */ name: string; } export interface AlertConditionThreshold { /** * actual value used in comparison (one of the attributes is required): */ value: outputs.AlertConditionThresholdValue; } export interface AlertConditionThresholdValue { /** * boolean value (`true` or `false`) to compare against boolean results. */ boolValue?: boolean; /** * double value to compare against integer and double results. */ doubleValue?: number; /** * string value to compare against string results. */ stringValue?: string; } export interface AlertV2EffectiveRunAs { /** * Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role */ servicePrincipalName?: string; /** * The email of an active workspace user. Can only set this field to their own email */ userName?: string; } export interface AlertV2Evaluation { /** * Operator used for comparison in alert evaluation. Possible values are: `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NOT_NULL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL` */ comparisonOperator?: string; /** * Alert state if result is empty. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ emptyResultState?: string; /** * (string) - Timestamp of the last evaluation */ lastEvaluatedAt: string; /** * User or Notification Destination to notify when alert is triggered */ notification?: outputs.AlertV2EvaluationNotification; /** * Source column from result to use to evaluate alert */ source?: outputs.AlertV2EvaluationSource; /** * (string) - Latest state of alert evaluation. Possible values are: `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN` */ state: string; /** * Threshold to user for alert evaluation, can be a column or a value */ threshold?: outputs.AlertV2EvaluationThreshold; } export interface AlertV2EvaluationNotification { /** * Whether to notify alert subscribers when alert returns back to normal */ notifyOnOk?: boolean; /** * Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it can be triggered again. If 0 or not specified, the alert will not be triggered again */ retriggerSeconds?: number; subscriptions?: outputs.AlertV2EvaluationNotificationSubscription[]; } export interface AlertV2EvaluationNotificationSubscription { destinationId?: string; userEmail?: string; } export interface AlertV2EvaluationSource { /** * . Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: string; display?: string; name?: string; } export interface AlertV2EvaluationThreshold { column?: outputs.AlertV2EvaluationThresholdColumn; value?: outputs.AlertV2EvaluationThresholdValue; } export interface AlertV2EvaluationThresholdColumn { /** * . Possible values are: `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM` */ aggregation?: string; display?: string; name?: string; } export interface AlertV2EvaluationThresholdValue { boolValue?: boolean; doubleValue?: number; stringValue?: string; } export interface AlertV2RunAs { /** * Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role */ servicePrincipalName?: string; /** * The email of an active workspace user. Can only set this field to their own email */ userName?: string; } export interface AlertV2Schedule { /** * Indicate whether this schedule is paused or not. Possible values are: `PAUSED`, `UNPAUSED` */ pauseStatus?: string; /** * A cron expression using quartz syntax that specifies the schedule for this pipeline. * Should use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html */ quartzCronSchedule?: string; /** * A Java timezone id. The schedule will be resolved using this timezone. * This will be combined with the quartzCronSchedule to determine the schedule. * See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details */ timezoneId?: string; } export interface AppActiveDeployment { /** * The creation time of the app. */ createTime: string; /** * The email of the user that created the app. */ creator: string; deploymentArtifacts: outputs.AppActiveDeploymentDeploymentArtifacts; deploymentId?: string; mode?: string; sourceCodePath?: string; status: outputs.AppActiveDeploymentStatus; /** * The update time of the app. */ updateTime: string; } export interface AppActiveDeploymentDeploymentArtifacts { sourceCodePath?: string; } export interface AppActiveDeploymentStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppAppStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppComputeStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppPendingDeployment { /** * The creation time of the app. */ createTime: string; /** * The email of the user that created the app. */ creator: string; deploymentArtifacts: outputs.AppPendingDeploymentDeploymentArtifacts; deploymentId?: string; mode?: string; sourceCodePath?: string; status: outputs.AppPendingDeploymentStatus; /** * The update time of the app. */ updateTime: string; } export interface AppPendingDeploymentDeploymentArtifacts { sourceCodePath?: string; } export interface AppPendingDeploymentStatus { /** * Application status message */ message: string; /** * State of the application. */ state: string; } export interface AppResource { /** * attribute */ database?: outputs.AppResourceDatabase; /** * The description of the resource. * * Exactly one of the following attributes must be provided: */ description?: string; /** * attribute */ job?: outputs.AppResourceJob; /** * The name of the resource. */ name: string; /** * attribute */ secret?: outputs.AppResourceSecret; /** * attribute */ servingEndpoint?: outputs.AppResourceServingEndpoint; /** * attribute */ sqlWarehouse?: outputs.AppResourceSqlWarehouse; /** * attribute (see the [API docs](https://docs.databricks.com/api/workspace/apps/create#resources-uc_securable) for full list of supported UC objects) */ ucSecurable?: outputs.AppResourceUcSecurable; } export interface AppResourceDatabase { /** * The name of database. */ databaseName: string; /** * The name of database instance. */ instanceName: string; /** * Permission to grant on database. Supported permissions are: `CAN_CONNECT_AND_CREATE`. */ permission: string; } export interface AppResourceJob { /** * Id of the job to grant permission on. */ id: string; /** * Permissions to grant on the Job. Supported permissions are: `CAN_MANAGE`, `IS_OWNER`, `CAN_MANAGE_RUN`, `CAN_VIEW`. */ permission: string; } export interface AppResourceSecret { /** * Key of the secret to grant permission on. */ key: string; /** * Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission must be one of: `READ`, `WRITE`, `MANAGE`. */ permission: string; /** * Scope of the secret to grant permission on. */ scope: string; } export interface AppResourceServingEndpoint { /** * Name of the serving endpoint to grant permission on. */ name: string; /** * Permission to grant on the serving endpoint. Supported permissions are: `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`. */ permission: string; } export interface AppResourceSqlWarehouse { /** * Id of the SQL warehouse to grant permission on. */ id: string; /** * Permission to grant on the SQL warehouse. Supported permissions are: `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`. */ permission: string; } export interface AppResourceUcSecurable { /** * Permissions to grant on UC securable, i.e. `READ_VOLUME`, `WRITE_VOLUME`. */ permission: string; /** * the full name of UC securable, i.e. `my-catalog.my-schema.my-volume`. */ securableFullName: string; /** * the type of UC securable, i.e. `VOLUME`. */ securableType: string; } export interface AppsSettingsCustomTemplateManifest { /** * The description of the template */ description?: string; /** * The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and whitespaces. * It must be unique within the workspace */ name: string; resourceSpecs?: outputs.AppsSettingsCustomTemplateManifestResourceSpec[]; /** * The manifest schema version, for now only 1 is allowed */ version: number; } export interface AppsSettingsCustomTemplateManifestResourceSpec { /** * The description of the template */ description?: string; jobSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecJobSpec; /** * The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and whitespaces. * It must be unique within the workspace */ name: string; secretSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecSecretSpec; servingEndpointSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecServingEndpointSpec; sqlWarehouseSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecSqlWarehouseSpec; ucSecurableSpec?: outputs.AppsSettingsCustomTemplateManifestResourceSpecUcSecurableSpec; } export interface AppsSettingsCustomTemplateManifestResourceSpecJobSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecSecretSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecServingEndpointSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecSqlWarehouseSpec { permission: string; } export interface AppsSettingsCustomTemplateManifestResourceSpecUcSecurableSpec { permission: string; /** * . Possible values are: `VOLUME` */ securableType: string; } export interface ArtifactAllowlistArtifactMatcher { /** * The artifact path or maven coordinate. */ artifact: string; /** * The pattern matching type of the artifact. Only `PREFIX_MATCH` is supported. */ matchType: string; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace { canToggle?: boolean; enabled: boolean; enablementDetails: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails; maintenanceWindow?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow; restartEvenIfNoUpdatesAvailable?: boolean; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails { forcedForComplianceMode?: boolean; unavailableForDisabledEntitlement?: boolean; unavailableForNonEnterpriseTier?: boolean; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow { weekDayBasedSchedule?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule { dayOfWeek: string; frequency: string; windowStartTime?: outputs.AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime; } export interface AutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime { hours: number; minutes: number; } export interface BudgetAlertConfiguration { /** * List of action configurations to take when the budget alert is triggered. Consists of the following fields: */ actionConfigurations?: outputs.BudgetAlertConfigurationActionConfiguration[]; alertConfigurationId: string; /** * The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantityType`. */ quantityThreshold?: string; /** * The way to calculate cost for this budget alert. This is what quantityThreshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`) */ quantityType?: string; /** * The time window of usage data for the budget. (Enum: `MONTH`) */ timePeriod?: string; /** * The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`) */ triggerType?: string; } export interface BudgetAlertConfigurationActionConfiguration { actionConfigurationId: string; /** * The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`) */ actionType?: string; /** * The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to. */ target?: string; } export interface BudgetFilter { /** * List of tags to filter by. Consists of the following fields: */ tags?: outputs.BudgetFilterTag[]; /** * Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields: */ workspaceId?: outputs.BudgetFilterWorkspaceId; } export interface BudgetFilterTag { /** * The key of the tag. */ key?: string; /** * Consists of the following fields: */ value?: outputs.BudgetFilterTagValue; } export interface BudgetFilterTagValue { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: string; /** * The values to filter by. */ values?: string[]; } export interface BudgetFilterWorkspaceId { /** * The operator to use for the filter. (Enum: `IN`) */ operator?: string; /** * The values to filter by. */ values?: number[]; } export interface BudgetPolicyCustomTag { /** * The key of the tag. * - Must be unique among all custom tags of the same policy * - Cannot be “budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - * these tags are preserved */ key: string; /** * The value of the tag */ value?: string; } export interface CatalogEffectivePredictiveOptimizationFlag { inheritedFromName?: string; inheritedFromType?: string; value: string; } export interface CatalogProvisioningInfo { state?: string; } export interface CleanRoomAssetForeignTable { /** * (list of ColumnInfo) - The metadata information of the columns in the view */ columns: outputs.CleanRoomAssetForeignTableColumn[]; } export interface CleanRoomAssetForeignTableColumn { comment?: string; mask?: outputs.CleanRoomAssetForeignTableColumnMask; /** * A fully qualified name that uniquely identifies the asset within the clean room. * This is also the name displayed in the clean room UI. * * For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* * * For notebooks, the name is the notebook file name. * For jar analyses, the name is the jar analysis name */ name?: string; /** * Whether field may be Null (default: true) */ nullable?: boolean; /** * Partition index for column */ partitionIndex?: number; /** * Ordinal position of column (starting at position 0) */ position?: number; /** * Format of IntervalType */ typeIntervalType?: string; /** * Full data type specification, JSON-serialized */ typeJson?: string; /** * . Possible values are: `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `GEOGRAPHY`, `GEOMETRY`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT` */ typeName?: string; /** * Digits of precision; required for DecimalTypes */ typePrecision?: number; /** * Digits to right of decimal; Required for DecimalTypes */ typeScale?: number; /** * Full data type specification as SQL/catalogString text */ typeText?: string; } export interface CleanRoomAssetForeignTableColumnMask { /** * The full name of the column mask SQL UDF */ functionName?: string; /** * The list of additional table columns to be passed as input to the column mask function. The * first arg of the mask function should be of the type of the column being masked and the * types of the rest of the args should match the types of columns in 'using_column_names' */ usingColumnNames?: string[]; } export interface CleanRoomAssetForeignTableLocalDetails { localName: string; } export interface CleanRoomAssetNotebook { /** * (string) - Server generated etag that represents the notebook version */ etag: string; /** * Base 64 representation of the notebook contents. * This is the same format as returned by :method:workspace/export with the format of **HTML** */ notebookContent: string; /** * (string) - Top-level status derived from all reviews. Possible values are: `APPROVED`, `PENDING`, `REJECTED` */ reviewState: string; /** * (list of CleanRoomNotebookReview) - All existing approvals or rejections */ reviews: outputs.CleanRoomAssetNotebookReview[]; /** * Aliases of collaborators that can run the notebook */ runnerCollaboratorAliases?: string[]; } export interface CleanRoomAssetNotebookReview { comment?: string; /** * When the review was submitted, in epoch milliseconds */ createdAtMillis?: number; /** * (string) - Top-level status derived from all reviews. Possible values are: `APPROVED`, `PENDING`, `REJECTED` */ reviewState?: string; /** * Specified when the review was not explicitly made by a user. Possible values are: `AUTO_APPROVED`, `BACKFILLED` */ reviewSubReason?: string; /** * Collaborator alias of the reviewer */ reviewerCollaboratorAlias?: string; } export interface CleanRoomAssetTable { /** * (list of ColumnInfo) - The metadata information of the columns in the view */ columns: outputs.CleanRoomAssetTableColumn[]; } export interface CleanRoomAssetTableColumn { comment?: string; mask?: outputs.CleanRoomAssetTableColumnMask; /** * A fully qualified name that uniquely identifies the asset within the clean room. * This is also the name displayed in the clean room UI. * * For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* * * For notebooks, the name is the notebook file name. * For jar analyses, the name is the jar analysis name */ name?: string; /** * Whether field may be Null (default: true) */ nullable?: boolean; /** * Partition index for column */ partitionIndex?: number; /** * Ordinal position of column (starting at position 0) */ position?: number; /** * Format of IntervalType */ typeIntervalType?: string; /** * Full data type specification, JSON-serialized */ typeJson?: string; /** * . Possible values are: `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `GEOGRAPHY`, `GEOMETRY`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT` */ typeName?: string; /** * Digits of precision; required for DecimalTypes */ typePrecision?: number; /** * Digits to right of decimal; Required for DecimalTypes */ typeScale?: number; /** * Full data type specification as SQL/catalogString text */ typeText?: string; } export interface CleanRoomAssetTableColumnMask { /** * The full name of the column mask SQL UDF */ functionName?: string; /** * The list of additional table columns to be passed as input to the column mask function. The * first arg of the mask function should be of the type of the column being masked and the * types of the rest of the args should match the types of columns in 'using_column_names' */ usingColumnNames?: string[]; } export interface CleanRoomAssetTableLocalDetails { localName: string; /** * Partition filtering specification for a shared table */ partitions?: outputs.CleanRoomAssetTableLocalDetailsPartition[]; } export interface CleanRoomAssetTableLocalDetailsPartition { /** * The value of the partition column. When this value is not set, it means `null` value. * When this field is set, field `recipientPropertyKey` can not be set */ values?: outputs.CleanRoomAssetTableLocalDetailsPartitionValue[]; } export interface CleanRoomAssetTableLocalDetailsPartitionValue { /** * A fully qualified name that uniquely identifies the asset within the clean room. * This is also the name displayed in the clean room UI. * * For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* * * For notebooks, the name is the notebook file name. * For jar analyses, the name is the jar analysis name */ name?: string; /** * The operator to apply for the value. Possible values are: `EQUAL`, `LIKE` */ op?: string; /** * The key of a Delta Sharing recipient's property. For example "databricks-account-id". * When this field is set, field `value` can not be set */ recipientPropertyKey?: string; /** * The value of the partition column. When this value is not set, it means `null` value. * When this field is set, field `recipientPropertyKey` can not be set */ value?: string; } export interface CleanRoomAssetView { /** * (list of ColumnInfo) - The metadata information of the columns in the view */ columns: outputs.CleanRoomAssetViewColumn[]; } export interface CleanRoomAssetViewColumn { comment?: string; mask?: outputs.CleanRoomAssetViewColumnMask; /** * A fully qualified name that uniquely identifies the asset within the clean room. * This is also the name displayed in the clean room UI. * * For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* * * For notebooks, the name is the notebook file name. * For jar analyses, the name is the jar analysis name */ name?: string; /** * Whether field may be Null (default: true) */ nullable?: boolean; /** * Partition index for column */ partitionIndex?: number; /** * Ordinal position of column (starting at position 0) */ position?: number; /** * Format of IntervalType */ typeIntervalType?: string; /** * Full data type specification, JSON-serialized */ typeJson?: string; /** * . Possible values are: `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `GEOGRAPHY`, `GEOMETRY`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT` */ typeName?: string; /** * Digits of precision; required for DecimalTypes */ typePrecision?: number; /** * Digits to right of decimal; Required for DecimalTypes */ typeScale?: number; /** * Full data type specification as SQL/catalogString text */ typeText?: string; } export interface CleanRoomAssetViewColumnMask { /** * The full name of the column mask SQL UDF */ functionName?: string; /** * The list of additional table columns to be passed as input to the column mask function. The * first arg of the mask function should be of the type of the column being masked and the * types of the rest of the args should match the types of columns in 'using_column_names' */ usingColumnNames?: string[]; } export interface CleanRoomAssetViewLocalDetails { localName: string; } export interface CleanRoomAssetVolumeLocalDetails { localName: string; } export interface CleanRoomsCleanRoomOutputCatalog { /** * The name of the output catalog in UC. * It should follow [UC securable naming requirements](https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements). * The field will always exist if status is CREATED */ catalogName?: string; /** * (string) - . Possible values are: `CREATED`, `NOT_CREATED`, `NOT_ELIGIBLE` */ status: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfo { /** * (string) - Central clean room ID */ centralCleanRoomId: string; /** * Cloud vendor (aws,azure,gcp) of the central clean room */ cloudVendor?: string; /** * Collaborators in the central clean room. There should one and only one collaborator * in the list that satisfies the owner condition: * * 1. It has the creator's globalMetastoreId (determined by caller of CreateCleanRoom). * * 2. Its inviteRecipientEmail is empty */ collaborators?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoCollaborator[]; /** * (ComplianceSecurityProfile) */ complianceSecurityProfile: outputs.CleanRoomsCleanRoomRemoteDetailedInfoComplianceSecurityProfile; /** * (CleanRoomCollaborator) - Collaborator who creates the clean room */ creator: outputs.CleanRoomsCleanRoomRemoteDetailedInfoCreator; /** * Egress network policy to apply to the central clean room workspace */ egressNetworkPolicy?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicy; region?: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoCollaborator { /** * Collaborator alias specified by the clean room creator. It is unique across all collaborators of this clean room, and used to derive * multiple values internally such as catalog alias and clean room name for single metastore clean rooms. * It should follow [UC securable naming requirements](https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements) */ collaboratorAlias: string; /** * (string) - Generated display name for the collaborator. In the case of a single metastore clean room, it is the clean * room name. For x-metastore clean rooms, it is the organization name of the metastore. It is not restricted to * these values and could change in the future */ displayName: string; /** * The global Unity Catalog metastore ID of the collaborator. The identifier is of format cloud:region:metastore-uuid */ globalMetastoreId?: string; /** * Email of the user who is receiving the clean room "invitation". It should be empty * for the creator of the clean room, and non-empty for the invitees of the clean room. * It is only returned in the output when clean room creator calls GET */ inviteRecipientEmail?: string; /** * Workspace ID of the user who is receiving the clean room "invitation". Must be specified if * inviteRecipientEmail is specified. * It should be empty when the collaborator is the creator of the clean room */ inviteRecipientWorkspaceId: number; /** * (string) - Organization name * configured in the metastore */ organizationName: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoComplianceSecurityProfile { /** * The list of compliance standards that the compliance security profile is configured to enforce */ complianceStandards?: string[]; /** * Whether the compliance security profile is enabled */ isEnabled?: boolean; } export interface CleanRoomsCleanRoomRemoteDetailedInfoCreator { /** * Collaborator alias specified by the clean room creator. It is unique across all collaborators of this clean room, and used to derive * multiple values internally such as catalog alias and clean room name for single metastore clean rooms. * It should follow [UC securable naming requirements](https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements) */ collaboratorAlias: string; /** * (string) - Generated display name for the collaborator. In the case of a single metastore clean room, it is the clean * room name. For x-metastore clean rooms, it is the organization name of the metastore. It is not restricted to * these values and could change in the future */ displayName: string; /** * The global Unity Catalog metastore ID of the collaborator. The identifier is of format cloud:region:metastore-uuid */ globalMetastoreId?: string; /** * Email of the user who is receiving the clean room "invitation". It should be empty * for the creator of the clean room, and non-empty for the invitees of the clean room. * It is only returned in the output when clean room creator calls GET */ inviteRecipientEmail?: string; /** * Workspace ID of the user who is receiving the clean room "invitation". Must be specified if * inviteRecipientEmail is specified. * It should be empty when the collaborator is the creator of the clean room */ inviteRecipientWorkspaceId: number; /** * (string) - Organization name * configured in the metastore */ organizationName: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicy { /** * The access policy enforced for egress traffic to the internet */ internetAccess?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccess; } export interface CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccess { allowedInternetDestinations?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessAllowedInternetDestination[]; allowedStorageDestinations?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessAllowedStorageDestination[]; /** * Optional. If not specified, assume the policy is enforced for all workloads */ logOnlyMode?: outputs.CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessLogOnlyMode; /** * . Possible values are: `FULL_ACCESS`, `PRIVATE_ACCESS_ONLY`, `RESTRICTED_ACCESS` */ restrictionMode?: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessAllowedInternetDestination { destination?: string; /** * . Possible values are: `TCP` */ protocol?: string; type?: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessAllowedStorageDestination { allowedPaths?: string[]; azureContainer?: string; azureDnsZone?: string; azureStorageAccount?: string; azureStorageService?: string; bucketName?: string; region?: string; type?: string; } export interface CleanRoomsCleanRoomRemoteDetailedInfoEgressNetworkPolicyInternetAccessLogOnlyMode { /** * . Possible values are: `ALL_SERVICES`, `SELECTED_SERVICES` */ logOnlyModeType?: string; workloads?: string[]; } export interface ClusterAutoscale { /** * The maximum number of workers to which the cluster can scale up when overloaded. maxWorkers must be strictly greater than min_workers. * * To create a [single node cluster](https://docs.databricks.com/clusters/single-node.html), set `isSingleNode = true` and `kind = "CLASSIC_PREVIEW"` for the cluster. Single-node clusters are suitable for small, non-distributed workloads like single-node machine learning use-cases. * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const smallest = databricks.getNodeType({ * localDisk: true, * }); * const latestLts = databricks.getSparkVersion({ * longTermSupport: true, * }); * const singleNode = new databricks.Cluster("single_node", { * clusterName: "Single Node", * sparkVersion: latestLts.then(latestLts => latestLts.id), * nodeTypeId: smallest.then(smallest => smallest.id), * autoterminationMinutes: 20, * isSingleNode: true, * kind: "CLASSIC_PREVIEW", * }); * ``` */ maxWorkers?: number; /** * The minimum number of workers to which the cluster can scale down when underutilized. It is also the initial number of workers the cluster will have after creation. */ minWorkers?: number; } export interface ClusterAwsAttributes { /** * Availability type used for all subsequent nodes past the `firstOnDemand` ones. Valid values are `SPOT`, `SPOT_WITH_FALLBACK` and `ON_DEMAND`. Note: If `firstOnDemand` is zero, this availability type will be used for the entire cluster. Backend default value is `SPOT_WITH_FALLBACK` and could change in the future */ availability?: string; /** * The number of volumes launched for each instance. You can choose up to 10 volumes. This feature is only enabled for supported node types. Legacy node types cannot specify custom EBS volumes. For node types with no instance store, at least one EBS volume needs to be specified; otherwise, cluster creation will fail. These EBS volumes will be mounted at /ebs0, /ebs1, and etc. Instance store volumes will be mounted at /local_disk0, /local_disk1, and etc. If EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for scratch storage because heterogeneously sized scratch devices can lead to inefficient disk utilization. If no EBS volumes are attached, Databricks will configure Spark to use instance store volumes. If EBS volumes are specified, then the Spark configuration spark.local.dir will be overridden. */ ebsVolumeCount?: number; ebsVolumeIops?: number; /** * The size of each EBS volume (in GiB) launched for each instance. For general purpose SSD, this value must be within the range 100 - 4096. For throughput optimized HDD, this value must be within the range 500 - 4096. Custom EBS volumes cannot be specified for the legacy node types (memory-optimized and compute-optimized). */ ebsVolumeSize?: number; ebsVolumeThroughput?: number; /** * The type of EBS volumes that will be launched with this cluster. Valid values are `GENERAL_PURPOSE_SSD` or `THROUGHPUT_OPTIMIZED_HDD`. Use this option only if you're not picking *Delta Optimized `i3.*`* node types. */ ebsVolumeType?: string; /** * The first `firstOnDemand` node