UNPKG

@pulumi/databricks

Version:

A Pulumi package for creating and managing databricks cloud resources.

122 lines 6.05 kB
"use strict"; // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** Object.defineProperty(exports, "__esModule", { value: true }); exports.SqlGlobalConfig = void 0; const pulumi = require("@pulumi/pulumi"); const utilities = require("./utilities"); /** * This resource configures the security policy, databricks_instance_profile, and [data access properties](https://docs.databricks.com/sql/admin/data-access-configuration.html) for all databricks.SqlEndpoint of workspace. *Please note that changing parameters of this resource will restart all running databricks_sql_endpoint.* To use this resource you need to be an administrator. * * ## Example Usage * * ### AWS example * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const _this = new databricks.SqlGlobalConfig("this", { * securityPolicy: "DATA_ACCESS_CONTROL", * instanceProfileArn: "arn:....", * dataAccessConfig: { * "spark.sql.session.timeZone": "UTC", * }, * }); * ``` * * ### Azure example * * For Azure you should use the `dataAccessConfig` to provide the service principal configuration. You can use the Databricks SQL Admin Console UI to help you generate the right configuration values. * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const _this = new databricks.SqlGlobalConfig("this", { * securityPolicy: "DATA_ACCESS_CONTROL", * dataAccessConfig: { * "spark.hadoop.fs.azure.account.auth.type": "OAuth", * "spark.hadoop.fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider", * "spark.hadoop.fs.azure.account.oauth2.client.id": applicationId, * "spark.hadoop.fs.azure.account.oauth2.client.secret": `{{secrets/${secretScope}/${secretKey}}}`, * "spark.hadoop.fs.azure.account.oauth2.client.endpoint": `https://login.microsoftonline.com/${tenantId}/oauth2/token`, * }, * sqlConfigParams: { * ANSI_MODE: "true", * }, * }); * ``` * * ## Related Resources * * The following resources are often used in the same context: * * * End to end workspace management guide. * * databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount. * * databricks.SqlDashboard to manage Databricks SQL [Dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). * * databricks.SqlEndpoint to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). * * databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). * * ## Import * * You can import a `databricks_sql_global_config` resource with command like the following (you need to use `global` as ID): * * bash * * ```sh * $ pulumi import databricks:index/sqlGlobalConfig:SqlGlobalConfig this global * ``` */ class SqlGlobalConfig extends pulumi.CustomResource { /** * Get an existing SqlGlobalConfig resource's state with the given name, ID, and optional extra * properties used to qualify the lookup. * * @param name The _unique_ name of the resulting resource. * @param id The _unique_ provider ID of the resource to lookup. * @param state Any extra arguments used during the lookup. * @param opts Optional settings to control the behavior of the CustomResource. */ static get(name, id, state, opts) { return new SqlGlobalConfig(name, state, Object.assign(Object.assign({}, opts), { id: id })); } /** * Returns true if the given object is an instance of SqlGlobalConfig. This is designed to work even * when multiple copies of the Pulumi SDK have been loaded into the same process. */ static isInstance(obj) { if (obj === undefined || obj === null) { return false; } return obj['__pulumiType'] === SqlGlobalConfig.__pulumiType; } constructor(name, argsOrState, opts) { let resourceInputs = {}; opts = opts || {}; if (opts.id) { const state = argsOrState; resourceInputs["dataAccessConfig"] = state ? state.dataAccessConfig : undefined; resourceInputs["enableServerlessCompute"] = state ? state.enableServerlessCompute : undefined; resourceInputs["googleServiceAccount"] = state ? state.googleServiceAccount : undefined; resourceInputs["instanceProfileArn"] = state ? state.instanceProfileArn : undefined; resourceInputs["securityPolicy"] = state ? state.securityPolicy : undefined; resourceInputs["sqlConfigParams"] = state ? state.sqlConfigParams : undefined; } else { const args = argsOrState; resourceInputs["dataAccessConfig"] = args ? args.dataAccessConfig : undefined; resourceInputs["enableServerlessCompute"] = args ? args.enableServerlessCompute : undefined; resourceInputs["googleServiceAccount"] = args ? args.googleServiceAccount : undefined; resourceInputs["instanceProfileArn"] = args ? args.instanceProfileArn : undefined; resourceInputs["securityPolicy"] = args ? args.securityPolicy : undefined; resourceInputs["sqlConfigParams"] = args ? args.sqlConfigParams : undefined; } opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts); super(SqlGlobalConfig.__pulumiType, name, resourceInputs, opts); } } exports.SqlGlobalConfig = SqlGlobalConfig; /** @internal */ SqlGlobalConfig.__pulumiType = 'databricks:index/sqlGlobalConfig:SqlGlobalConfig'; //# sourceMappingURL=sqlGlobalConfig.js.map