@pulumi/databricks
Version:
A Pulumi package for creating and managing databricks cloud resources.
134 lines • 6.72 kB
JavaScript
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
Object.defineProperty(exports, "__esModule", { value: true });
exports.getSparkVersionOutput = exports.getSparkVersion = void 0;
const pulumi = require("@pulumi/pulumi");
const utilities = require("./utilities");
/**
* > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors.
*
* Gets [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `sparkVersion` parameter in databricks.Cluster and other resources that fits search criteria, like specific Spark or Scala version, ML or Genomics runtime, etc., similar to executing `databricks clusters spark-versions`, and filters it to return the latest version that matches criteria. Often used along databricks.getNodeType data source.
*
* > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. together `ml = true` and `genomics = true`, or something like), data source will throw an error. Similarly, if search returns multiple results, and `latest = false`, data source will throw an error.
*
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const withGpu = databricks.getNodeType({
* localDisk: true,
* minCores: 16,
* gbPerCore: 1,
* minGpus: 1,
* });
* const gpuMl = databricks.getSparkVersion({
* gpu: true,
* ml: true,
* });
* const research = new databricks.Cluster("research", {
* clusterName: "Research Cluster",
* sparkVersion: gpuMl.then(gpuMl => gpuMl.id),
* nodeTypeId: withGpu.then(withGpu => withGpu.id),
* autoterminationMinutes: 20,
* autoscale: {
* minWorkers: 1,
* maxWorkers: 50,
* },
* });
* ```
*
* ## Related Resources
*
* The following resources are used in the same context:
*
* * End to end workspace management guide.
* * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html).
* * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
* * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
* * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
*/
function getSparkVersion(args, opts) {
args = args || {};
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {});
return pulumi.runtime.invoke("databricks:index/getSparkVersion:getSparkVersion", {
"beta": args.beta,
"genomics": args.genomics,
"gpu": args.gpu,
"graviton": args.graviton,
"id": args.id,
"latest": args.latest,
"longTermSupport": args.longTermSupport,
"ml": args.ml,
"photon": args.photon,
"scala": args.scala,
"sparkVersion": args.sparkVersion,
}, opts);
}
exports.getSparkVersion = getSparkVersion;
/**
* > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors.
*
* Gets [Databricks Runtime (DBR)](https://docs.databricks.com/runtime/dbr.html) version that could be used for `sparkVersion` parameter in databricks.Cluster and other resources that fits search criteria, like specific Spark or Scala version, ML or Genomics runtime, etc., similar to executing `databricks clusters spark-versions`, and filters it to return the latest version that matches criteria. Often used along databricks.getNodeType data source.
*
* > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. together `ml = true` and `genomics = true`, or something like), data source will throw an error. Similarly, if search returns multiple results, and `latest = false`, data source will throw an error.
*
* ## Example Usage
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const withGpu = databricks.getNodeType({
* localDisk: true,
* minCores: 16,
* gbPerCore: 1,
* minGpus: 1,
* });
* const gpuMl = databricks.getSparkVersion({
* gpu: true,
* ml: true,
* });
* const research = new databricks.Cluster("research", {
* clusterName: "Research Cluster",
* sparkVersion: gpuMl.then(gpuMl => gpuMl.id),
* nodeTypeId: withGpu.then(withGpu => withGpu.id),
* autoterminationMinutes: 20,
* autoscale: {
* minWorkers: 1,
* maxWorkers: 50,
* },
* });
* ```
*
* ## Related Resources
*
* The following resources are used in the same context:
*
* * End to end workspace management guide.
* * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html).
* * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules.
* * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances.
* * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
*/
function getSparkVersionOutput(args, opts) {
args = args || {};
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {});
return pulumi.runtime.invokeOutput("databricks:index/getSparkVersion:getSparkVersion", {
"beta": args.beta,
"genomics": args.genomics,
"gpu": args.gpu,
"graviton": args.graviton,
"id": args.id,
"latest": args.latest,
"longTermSupport": args.longTermSupport,
"ml": args.ml,
"photon": args.photon,
"scala": args.scala,
"sparkVersion": args.sparkVersion,
}, opts);
}
exports.getSparkVersionOutput = getSparkVersionOutput;
//# sourceMappingURL=getSparkVersion.js.map
;