@pulumi/databricks
Version:
A Pulumi package for creating and managing databricks cloud resources.
227 lines • 10.9 kB
JavaScript
;
// *** WARNING: this file was generated by pulumi-language-nodejs. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
Object.defineProperty(exports, "__esModule", { value: true });
exports.Job = void 0;
const pulumi = require("@pulumi/pulumi");
const utilities = require("./utilities");
/**
* The `databricks.Job` resource allows you to manage [Databricks Jobs](https://docs.databricks.com/jobs.html) to run non-interactive code in a databricks_cluster.
*
* > This resource can only be used with a workspace-level provider!
*
* ## Example Usage
*
* > In Pulumi configuration, it is recommended to define tasks in alphabetical order of their `taskKey` arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or `taskKey` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `task` blocks as an ordered list. Alternatively, `task` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task.
*
* It is possible to create [a Databricks job](https://docs.databricks.com/aws/en/jobs/) using `task` blocks. A single task is defined with the `task` block containing one of the `*_task` blocks, `taskKey`, and additional arguments described below.
*
* ```typescript
* import * as pulumi from "@pulumi/pulumi";
* import * as databricks from "@pulumi/databricks";
*
* const _this = new databricks.Job("this", {
* name: "Job with multiple tasks",
* description: "This job executes multiple tasks on a shared job cluster, which will be provisioned as part of execution, and terminated once all tasks are finished.",
* jobClusters: [{
* jobClusterKey: "j",
* newCluster: {
* numWorkers: 2,
* sparkVersion: latest.id,
* nodeTypeId: smallest.id,
* },
* }],
* tasks: [
* {
* taskKey: "a",
* newCluster: {
* numWorkers: 1,
* sparkVersion: latest.id,
* nodeTypeId: smallest.id,
* },
* notebookTask: {
* notebookPath: thisDatabricksNotebook.path,
* },
* },
* {
* taskKey: "b",
* dependsOns: [{
* taskKey: "a",
* }],
* existingClusterId: shared.id,
* sparkJarTask: {
* mainClassName: "com.acme.data.Main",
* },
* },
* {
* taskKey: "c",
* jobClusterKey: "j",
* notebookTask: {
* notebookPath: thisDatabricksNotebook.path,
* },
* },
* {
* taskKey: "d",
* pipelineTask: {
* pipelineId: thisDatabricksPipeline.id,
* },
* },
* ],
* });
* ```
*
* ## Access Control
*
* By default, all users can create and modify jobs unless an administrator [enables jobs access control](https://docs.databricks.com/administration-guide/access-control/jobs-acl.html). With jobs access control, individual permissions determine a user's abilities.
*
* * databricks.Permissions can control which groups or individual users can *Can View*, *Can Manage Run*, and *Can Manage*.
* * databricks.ClusterPolicy can control which kinds of clusters users can create for jobs.
*
* ## Import
*
* The resource job can be imported using the id of the job:
*
* hcl
*
* import {
*
* to = databricks_job.this
*
* id = "<job-id>"
*
* }
*
* Alternatively, when using `terraform` version 1.4 or earlier, import using the `pulumi import` command:
*
* bash
*
* ```sh
* $ pulumi import databricks:index/job:Job this <job-id>
* ```
*/
class Job extends pulumi.CustomResource {
/**
* Get an existing Job resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param state Any extra arguments used during the lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
static get(name, id, state, opts) {
return new Job(name, state, { ...opts, id: id });
}
/**
* Returns true if the given object is an instance of Job. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
static isInstance(obj) {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === Job.__pulumiType;
}
constructor(name, argsOrState, opts) {
let resourceInputs = {};
opts = opts || {};
if (opts.id) {
const state = argsOrState;
resourceInputs["alwaysRunning"] = state?.alwaysRunning;
resourceInputs["budgetPolicyId"] = state?.budgetPolicyId;
resourceInputs["continuous"] = state?.continuous;
resourceInputs["controlRunState"] = state?.controlRunState;
resourceInputs["dbtTask"] = state?.dbtTask;
resourceInputs["deployment"] = state?.deployment;
resourceInputs["description"] = state?.description;
resourceInputs["editMode"] = state?.editMode;
resourceInputs["emailNotifications"] = state?.emailNotifications;
resourceInputs["environments"] = state?.environments;
resourceInputs["existingClusterId"] = state?.existingClusterId;
resourceInputs["format"] = state?.format;
resourceInputs["gitSource"] = state?.gitSource;
resourceInputs["health"] = state?.health;
resourceInputs["jobClusters"] = state?.jobClusters;
resourceInputs["libraries"] = state?.libraries;
resourceInputs["maxConcurrentRuns"] = state?.maxConcurrentRuns;
resourceInputs["maxRetries"] = state?.maxRetries;
resourceInputs["minRetryIntervalMillis"] = state?.minRetryIntervalMillis;
resourceInputs["name"] = state?.name;
resourceInputs["newCluster"] = state?.newCluster;
resourceInputs["notebookTask"] = state?.notebookTask;
resourceInputs["notificationSettings"] = state?.notificationSettings;
resourceInputs["parameters"] = state?.parameters;
resourceInputs["performanceTarget"] = state?.performanceTarget;
resourceInputs["pipelineTask"] = state?.pipelineTask;
resourceInputs["providerConfig"] = state?.providerConfig;
resourceInputs["pythonWheelTask"] = state?.pythonWheelTask;
resourceInputs["queue"] = state?.queue;
resourceInputs["retryOnTimeout"] = state?.retryOnTimeout;
resourceInputs["runAs"] = state?.runAs;
resourceInputs["runJobTask"] = state?.runJobTask;
resourceInputs["schedule"] = state?.schedule;
resourceInputs["sparkJarTask"] = state?.sparkJarTask;
resourceInputs["sparkPythonTask"] = state?.sparkPythonTask;
resourceInputs["sparkSubmitTask"] = state?.sparkSubmitTask;
resourceInputs["tags"] = state?.tags;
resourceInputs["tasks"] = state?.tasks;
resourceInputs["timeoutSeconds"] = state?.timeoutSeconds;
resourceInputs["trigger"] = state?.trigger;
resourceInputs["url"] = state?.url;
resourceInputs["usagePolicyId"] = state?.usagePolicyId;
resourceInputs["webhookNotifications"] = state?.webhookNotifications;
}
else {
const args = argsOrState;
resourceInputs["alwaysRunning"] = args?.alwaysRunning;
resourceInputs["budgetPolicyId"] = args?.budgetPolicyId;
resourceInputs["continuous"] = args?.continuous;
resourceInputs["controlRunState"] = args?.controlRunState;
resourceInputs["dbtTask"] = args?.dbtTask;
resourceInputs["deployment"] = args?.deployment;
resourceInputs["description"] = args?.description;
resourceInputs["editMode"] = args?.editMode;
resourceInputs["emailNotifications"] = args?.emailNotifications;
resourceInputs["environments"] = args?.environments;
resourceInputs["existingClusterId"] = args?.existingClusterId;
resourceInputs["format"] = args?.format;
resourceInputs["gitSource"] = args?.gitSource;
resourceInputs["health"] = args?.health;
resourceInputs["jobClusters"] = args?.jobClusters;
resourceInputs["libraries"] = args?.libraries;
resourceInputs["maxConcurrentRuns"] = args?.maxConcurrentRuns;
resourceInputs["maxRetries"] = args?.maxRetries;
resourceInputs["minRetryIntervalMillis"] = args?.minRetryIntervalMillis;
resourceInputs["name"] = args?.name;
resourceInputs["newCluster"] = args?.newCluster;
resourceInputs["notebookTask"] = args?.notebookTask;
resourceInputs["notificationSettings"] = args?.notificationSettings;
resourceInputs["parameters"] = args?.parameters;
resourceInputs["performanceTarget"] = args?.performanceTarget;
resourceInputs["pipelineTask"] = args?.pipelineTask;
resourceInputs["providerConfig"] = args?.providerConfig;
resourceInputs["pythonWheelTask"] = args?.pythonWheelTask;
resourceInputs["queue"] = args?.queue;
resourceInputs["retryOnTimeout"] = args?.retryOnTimeout;
resourceInputs["runAs"] = args?.runAs;
resourceInputs["runJobTask"] = args?.runJobTask;
resourceInputs["schedule"] = args?.schedule;
resourceInputs["sparkJarTask"] = args?.sparkJarTask;
resourceInputs["sparkPythonTask"] = args?.sparkPythonTask;
resourceInputs["sparkSubmitTask"] = args?.sparkSubmitTask;
resourceInputs["tags"] = args?.tags;
resourceInputs["tasks"] = args?.tasks;
resourceInputs["timeoutSeconds"] = args?.timeoutSeconds;
resourceInputs["trigger"] = args?.trigger;
resourceInputs["usagePolicyId"] = args?.usagePolicyId;
resourceInputs["webhookNotifications"] = args?.webhookNotifications;
resourceInputs["url"] = undefined /*out*/;
}
opts = pulumi.mergeOptions(utilities.resourceOptsDefaults(), opts);
super(Job.__pulumiType, name, resourceInputs, opts);
}
}
exports.Job = Job;
/** @internal */
Job.__pulumiType = 'databricks:index/job:Job';
//# sourceMappingURL=job.js.map