UNPKG

@pulumi/databricks

Version:

A Pulumi package for creating and managing databricks cloud resources.

140 lines 7.61 kB
"use strict"; // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** Object.defineProperty(exports, "__esModule", { value: true }); exports.getNodeTypeOutput = exports.getNodeType = void 0; const pulumi = require("@pulumi/pulumi"); const utilities = require("./utilities"); /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors. * * Gets the smallest node type for databricks.Cluster that fits search criteria, like amount of RAM or number of cores. [AWS](https://databricks.com/product/aws-pricing/instance-types) or [Azure](https://azure.microsoft.com/en-us/pricing/details/databricks/). Internally data source fetches [node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) available per cloud, similar to executing `databricks clusters list-node-types`, and filters it to return the smallest possible node with criteria. * * > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. `minGpus = 876`) or no nodes matching, data source will return cloud-default node type, even though it doesn't match search criteria specified by data source arguments: [i3.xlarge](https://aws.amazon.com/ec2/instance-types/i3/) for AWS or [Standard_D3_v2](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs#dv2-series) for Azure. * * ## Example Usage * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const withGpu = databricks.getNodeType({ * localDisk: true, * minCores: 16, * gbPerCore: 1, * minGpus: 1, * }); * const gpuMl = databricks.getSparkVersion({ * gpu: true, * ml: true, * }); * const research = new databricks.Cluster("research", { * clusterName: "Research Cluster", * sparkVersion: gpuMl.then(gpuMl => gpuMl.id), * nodeTypeId: withGpu.then(withGpu => withGpu.id), * autoterminationMinutes: 20, * autoscale: { * minWorkers: 1, * maxWorkers: 50, * }, * }); * ``` * * ## Related Resources * * The following resources are used in the same context: * * * End to end workspace management guide. * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. */ function getNodeType(args, opts) { args = args || {}; opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); return pulumi.runtime.invoke("databricks:index/getNodeType:getNodeType", { "category": args.category, "fleet": args.fleet, "gbPerCore": args.gbPerCore, "graviton": args.graviton, "id": args.id, "isIoCacheEnabled": args.isIoCacheEnabled, "localDisk": args.localDisk, "localDiskMinSize": args.localDiskMinSize, "minCores": args.minCores, "minGpus": args.minGpus, "minMemoryGb": args.minMemoryGb, "photonDriverCapable": args.photonDriverCapable, "photonWorkerCapable": args.photonWorkerCapable, "supportPortForwarding": args.supportPortForwarding, }, opts); } exports.getNodeType = getNodeType; /** * > **Note** If you have a fully automated setup with workspaces created by databricks.MwsWorkspaces or azurerm_databricks_workspace, please make sure to add dependsOn attribute in order to prevent _default auth: cannot configure default credentials_ errors. * * Gets the smallest node type for databricks.Cluster that fits search criteria, like amount of RAM or number of cores. [AWS](https://databricks.com/product/aws-pricing/instance-types) or [Azure](https://azure.microsoft.com/en-us/pricing/details/databricks/). Internally data source fetches [node types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types) available per cloud, similar to executing `databricks clusters list-node-types`, and filters it to return the smallest possible node with criteria. * * > **Note** This is experimental functionality, which aims to simplify things. In case of wrong parameters given (e.g. `minGpus = 876`) or no nodes matching, data source will return cloud-default node type, even though it doesn't match search criteria specified by data source arguments: [i3.xlarge](https://aws.amazon.com/ec2/instance-types/i3/) for AWS or [Standard_D3_v2](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs#dv2-series) for Azure. * * ## Example Usage * * ```typescript * import * as pulumi from "@pulumi/pulumi"; * import * as databricks from "@pulumi/databricks"; * * const withGpu = databricks.getNodeType({ * localDisk: true, * minCores: 16, * gbPerCore: 1, * minGpus: 1, * }); * const gpuMl = databricks.getSparkVersion({ * gpu: true, * ml: true, * }); * const research = new databricks.Cluster("research", { * clusterName: "Research Cluster", * sparkVersion: gpuMl.then(gpuMl => gpuMl.id), * nodeTypeId: withGpu.then(withGpu => withGpu.id), * autoterminationMinutes: 20, * autoscale: { * minWorkers: 1, * maxWorkers: 50, * }, * }); * ``` * * ## Related Resources * * The following resources are used in the same context: * * * End to end workspace management guide. * * databricks.Cluster to create [Databricks Clusters](https://docs.databricks.com/clusters/index.html). * * databricks.ClusterPolicy to create a databricks.Cluster policy, which limits the ability to create clusters based on a set of rules. * * databricks.InstancePool to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. * * databricks.Job to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster. */ function getNodeTypeOutput(args, opts) { args = args || {}; opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts || {}); return pulumi.runtime.invokeOutput("databricks:index/getNodeType:getNodeType", { "category": args.category, "fleet": args.fleet, "gbPerCore": args.gbPerCore, "graviton": args.graviton, "id": args.id, "isIoCacheEnabled": args.isIoCacheEnabled, "localDisk": args.localDisk, "localDiskMinSize": args.localDiskMinSize, "minCores": args.minCores, "minGpus": args.minGpus, "minMemoryGb": args.minMemoryGb, "photonDriverCapable": args.photonDriverCapable, "photonWorkerCapable": args.photonWorkerCapable, "supportPortForwarding": args.supportPortForwarding, }, opts); } exports.getNodeTypeOutput = getNodeTypeOutput; //# sourceMappingURL=getNodeType.js.map