googleapis
Version:
Google APIs Client Library for Node.js
926 lines • 317 kB
TypeScript
/// <reference types="node" />
import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosPromise, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common';
import { Readable } from 'stream';
export declare namespace dataproc_v1 {
export interface Options extends GlobalOptions {
version: 'v1';
}
interface StandardParameters {
/**
* Auth client or API Key for the request
*/
auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth;
/**
* V1 error format.
*/
'$.xgafv'?: string;
/**
* OAuth access token.
*/
access_token?: string;
/**
* Data format for response.
*/
alt?: string;
/**
* JSONP
*/
callback?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
*/
quotaUser?: string;
/**
* Legacy upload protocol for media (e.g. "media", "multipart").
*/
uploadType?: string;
/**
* Upload protocol for media (e.g. "raw", "multipart").
*/
upload_protocol?: string;
}
/**
* Cloud Dataproc API
*
* Manages Hadoop-based clusters and jobs on Google Cloud Platform.
*
* @example
* ```js
* const {google} = require('googleapis');
* const dataproc = google.dataproc('v1');
* ```
*/
export class Dataproc {
context: APIRequestContext;
projects: Resource$Projects;
constructor(options: GlobalOptions, google?: GoogleConfigurable);
}
/**
* Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine (https://cloud.google.com/compute/docs/gpus/).
*/
export interface Schema$AcceleratorConfig {
/**
* The number of the accelerator cards of this type exposed to this instance.
*/
acceleratorCount?: number | null;
/**
* Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See Compute Engine AcceleratorTypes (https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-k80 nvidia-tesla-k80Auto Zone Exception: If you are using the Dataproc Auto Zone Placement (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, nvidia-tesla-k80.
*/
acceleratorTypeUri?: string | null;
}
/**
* Autoscaling Policy config associated with the cluster.
*/
export interface Schema$AutoscalingConfig {
/**
* Optional. The autoscaling policy used by the cluster.Only resource names including projectid and location (region) are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id] projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]Note that the policy must be in the same project and Dataproc region.
*/
policyUri?: string | null;
}
/**
* Describes an autoscaling policy for Dataproc cluster autoscaler.
*/
export interface Schema$AutoscalingPolicy {
basicAlgorithm?: Schema$BasicAutoscalingAlgorithm;
/**
* Required. The policy id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.
*/
id?: string | null;
/**
* Optional. The labels to associate with this autoscaling policy. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with an autoscaling policy.
*/
labels?: {
[key: string]: string;
} | null;
/**
* Output only. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. For projects.regions.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id\}/regions/{region\}/autoscalingPolicies/{policy_id\} For projects.locations.autoscalingPolicies, the resource name of the policy has the following format: projects/{project_id\}/locations/{location\}/autoscalingPolicies/{policy_id\}
*/
name?: string | null;
/**
* Optional. Describes how the autoscaler will operate for secondary workers.
*/
secondaryWorkerConfig?: Schema$InstanceGroupAutoscalingPolicyConfig;
/**
* Required. Describes how the autoscaler will operate for primary workers.
*/
workerConfig?: Schema$InstanceGroupAutoscalingPolicyConfig;
}
/**
* Node group identification and configuration information.
*/
export interface Schema$AuxiliaryNodeGroup {
/**
* Required. Node group configuration.
*/
nodeGroup?: Schema$NodeGroup;
/**
* Optional. A node group ID. Generated if not specified.The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of from 3 to 33 characters.
*/
nodeGroupId?: string | null;
}
/**
* Auxiliary services configuration for a Cluster.
*/
export interface Schema$AuxiliaryServicesConfig {
/**
* Optional. The Hive Metastore configuration for this workload.
*/
metastoreConfig?: Schema$MetastoreConfig;
/**
* Optional. The Spark History Server configuration for the workload.
*/
sparkHistoryServerConfig?: Schema$SparkHistoryServerConfig;
}
/**
* Basic algorithm for autoscaling.
*/
export interface Schema$BasicAutoscalingAlgorithm {
/**
* Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
*/
cooldownPeriod?: string | null;
/**
* Optional. Spark Standalone autoscaling configuration
*/
sparkStandaloneConfig?: Schema$SparkStandaloneAutoscalingConfig;
/**
* Optional. YARN autoscaling configuration.
*/
yarnConfig?: Schema$BasicYarnAutoscalingConfig;
}
/**
* Basic autoscaling configurations for YARN.
*/
export interface Schema$BasicYarnAutoscalingConfig {
/**
* Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations.Bounds: 0s, 1d.
*/
gracefulDecommissionTimeout?: string | null;
/**
* Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
*/
scaleDownFactor?: number | null;
/**
* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
*/
scaleDownMinWorkerFraction?: number | null;
/**
* Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See How autoscaling works (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works) for more information.Bounds: 0.0, 1.0.
*/
scaleUpFactor?: number | null;
/**
* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
*/
scaleUpMinWorkerFraction?: number | null;
}
/**
* A representation of a batch workload in the service.
*/
export interface Schema$Batch {
/**
* Output only. The time when the batch was created.
*/
createTime?: string | null;
/**
* Output only. The email address of the user who created the batch.
*/
creator?: string | null;
/**
* Optional. Environment configuration for the batch execution.
*/
environmentConfig?: Schema$EnvironmentConfig;
/**
* Optional. The labels to associate with this batch. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a batch.
*/
labels?: {
[key: string]: string;
} | null;
/**
* Output only. The resource name of the batch.
*/
name?: string | null;
/**
* Output only. The resource name of the operation associated with this batch.
*/
operation?: string | null;
/**
* Optional. PySpark batch config.
*/
pysparkBatch?: Schema$PySparkBatch;
/**
* Optional. Runtime configuration for the batch execution.
*/
runtimeConfig?: Schema$RuntimeConfig;
/**
* Output only. Runtime information about batch execution.
*/
runtimeInfo?: Schema$RuntimeInfo;
/**
* Optional. Spark batch config.
*/
sparkBatch?: Schema$SparkBatch;
/**
* Optional. SparkR batch config.
*/
sparkRBatch?: Schema$SparkRBatch;
/**
* Optional. SparkSql batch config.
*/
sparkSqlBatch?: Schema$SparkSqlBatch;
/**
* Output only. The state of the batch.
*/
state?: string | null;
/**
* Output only. Historical state information for the batch.
*/
stateHistory?: Schema$StateHistory[];
/**
* Output only. Batch state details, such as a failure description if the state is FAILED.
*/
stateMessage?: string | null;
/**
* Output only. The time when the batch entered a current state.
*/
stateTime?: string | null;
/**
* Output only. A batch UUID (Unique Universal Identifier). The service generates this value when it creates the batch.
*/
uuid?: string | null;
}
/**
* Metadata describing the Batch operation.
*/
export interface Schema$BatchOperationMetadata {
/**
* Name of the batch for the operation.
*/
batch?: string | null;
/**
* Batch UUID for the operation.
*/
batchUuid?: string | null;
/**
* The time when the operation was created.
*/
createTime?: string | null;
/**
* Short description of the operation.
*/
description?: string | null;
/**
* The time when the operation finished.
*/
doneTime?: string | null;
/**
* Labels associated with the operation.
*/
labels?: {
[key: string]: string;
} | null;
/**
* The operation type.
*/
operationType?: string | null;
/**
* Warnings encountered during operation execution.
*/
warnings?: string[] | null;
}
/**
* Associates members, or principals, with a role.
*/
export interface Schema$Binding {
/**
* The condition that is associated with this binding.If the condition evaluates to true, then this binding applies to the current request.If the condition evaluates to false, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
*/
condition?: Schema$Expr;
/**
* Specifies the principals requesting access for a Google Cloud resource. members can have the following values: allUsers: A special identifier that represents anyone who is on the internet; with or without a Google account. allAuthenticatedUsers: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. user:{emailid\}: An email address that represents a specific Google account. For example, alice@example.com . serviceAccount:{emailid\}: An email address that represents a Google service account. For example, my-other-app@appspot.gserviceaccount.com. serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]: An identifier for a Kubernetes service account (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, my-project.svc.id.goog[my-namespace/my-kubernetes-sa]. group:{emailid\}: An email address that represents a Google group. For example, admins@example.com. domain:{domain\}: The G Suite domain (primary) that represents all the users of that domain. For example, google.com or example.com. deleted:user:{emailid\}?uid={uniqueid\}: An email address (plus unique identifier) representing a user that has been recently deleted. For example, alice@example.com?uid=123456789012345678901. If the user is recovered, this value reverts to user:{emailid\} and the recovered user retains the role in the binding. deleted:serviceAccount:{emailid\}?uid={uniqueid\}: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901. If the service account is undeleted, this value reverts to serviceAccount:{emailid\} and the undeleted service account retains the role in the binding. deleted:group:{emailid\}?uid={uniqueid\}: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, admins@example.com?uid=123456789012345678901. If the group is recovered, this value reverts to group:{emailid\} and the recovered group retains the role in the binding.
*/
members?: string[] | null;
/**
* Role that is assigned to the list of members, or principals. For example, roles/viewer, roles/editor, or roles/owner.
*/
role?: string | null;
}
/**
* A request to cancel a job.
*/
export interface Schema$CancelJobRequest {
}
/**
* Describes the identifying information, config, and status of a Dataproc cluster
*/
export interface Schema$Cluster {
/**
* Required. The cluster name, which must be unique within a project. The name must start with a lowercase letter, and can contain up to 51 lowercase letters, numbers, and hyphens. It cannot end with a hyphen. The name of a deleted cluster can be reused.
*/
clusterName?: string | null;
/**
* Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster.
*/
clusterUuid?: string | null;
/**
* Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated.Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
*/
config?: Schema$ClusterConfig;
/**
* Optional. The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
*/
labels?: {
[key: string]: string;
} | null;
/**
* Output only. Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.
*/
metrics?: Schema$ClusterMetrics;
/**
* Required. The Google Cloud Platform project ID that the cluster belongs to.
*/
projectId?: string | null;
/**
* Output only. Cluster status.
*/
status?: Schema$ClusterStatus;
/**
* Output only. The previous cluster status.
*/
statusHistory?: Schema$ClusterStatus[];
/**
* Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a Dataproc-on-GKE cluster (https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview). Dataproc may set default values, and values may change when clusters are updated. Exactly one of config or virtual_cluster_config must be specified.
*/
virtualClusterConfig?: Schema$VirtualClusterConfig;
}
/**
* The cluster config.
*/
export interface Schema$ClusterConfig {
/**
* Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.
*/
autoscalingConfig?: Schema$AutoscalingConfig;
/**
* Optional. The node group settings.
*/
auxiliaryNodeGroups?: Schema$AuxiliaryNodeGroup[];
/**
* Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
*/
configBucket?: string | null;
/**
* Optional. The config for Dataproc metrics.
*/
dataprocMetricConfig?: Schema$DataprocMetricConfig;
/**
* Optional. Encryption settings for the cluster.
*/
encryptionConfig?: Schema$EncryptionConfig;
/**
* Optional. Port/endpoint configuration for this cluster
*/
endpointConfig?: Schema$EndpointConfig;
/**
* Optional. The shared Compute Engine config settings for all instances in a cluster.
*/
gceClusterConfig?: Schema$GceClusterConfig;
/**
* Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. These config settings are mutually exclusive with Compute Engine-based options, such as gce_cluster_config, master_config, worker_config, secondary_worker_config, and autoscaling_config.
*/
gkeClusterConfig?: Schema$GkeClusterConfig;
/**
* Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE\}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
*/
initializationActions?: Schema$NodeInitializationAction[];
/**
* Optional. Lifecycle setting for the cluster.
*/
lifecycleConfig?: Schema$LifecycleConfig;
/**
* Optional. The Compute Engine config settings for the cluster's master instance.
*/
masterConfig?: Schema$InstanceGroupConfig;
/**
* Optional. Metastore configuration.
*/
metastoreConfig?: Schema$MetastoreConfig;
/**
* Optional. The Compute Engine config settings for a cluster's secondary worker instances
*/
secondaryWorkerConfig?: Schema$InstanceGroupConfig;
/**
* Optional. Security settings for the cluster.
*/
securityConfig?: Schema$SecurityConfig;
/**
* Optional. The config settings for cluster software.
*/
softwareConfig?: Schema$SoftwareConfig;
/**
* Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket (see Dataproc staging and temp buckets (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
*/
tempBucket?: string | null;
/**
* Optional. The Compute Engine config settings for the cluster's worker instances.
*/
workerConfig?: Schema$InstanceGroupConfig;
}
/**
* Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.
*/
export interface Schema$ClusterMetrics {
/**
* The HDFS metrics.
*/
hdfsMetrics?: {
[key: string]: string;
} | null;
/**
* YARN metrics.
*/
yarnMetrics?: {
[key: string]: string;
} | null;
}
/**
* The cluster operation triggered by a workflow.
*/
export interface Schema$ClusterOperation {
/**
* Output only. Indicates the operation is done.
*/
done?: boolean | null;
/**
* Output only. Error, if operation failed.
*/
error?: string | null;
/**
* Output only. The id of the cluster operation.
*/
operationId?: string | null;
}
/**
* Metadata describing the operation.
*/
export interface Schema$ClusterOperationMetadata {
/**
* Output only. Child operation ids
*/
childOperationIds?: string[] | null;
/**
* Output only. Name of the cluster for the operation.
*/
clusterName?: string | null;
/**
* Output only. Cluster UUID for the operation.
*/
clusterUuid?: string | null;
/**
* Output only. Short description of operation.
*/
description?: string | null;
/**
* Output only. Labels associated with the operation
*/
labels?: {
[key: string]: string;
} | null;
/**
* Output only. The operation type.
*/
operationType?: string | null;
/**
* Output only. Current operation status.
*/
status?: Schema$ClusterOperationStatus;
/**
* Output only. The previous operation status.
*/
statusHistory?: Schema$ClusterOperationStatus[];
/**
* Output only. Errors encountered during operation execution.
*/
warnings?: string[] | null;
}
/**
* The status of the operation.
*/
export interface Schema$ClusterOperationStatus {
/**
* Output only. A message containing any operation metadata details.
*/
details?: string | null;
/**
* Output only. A message containing the detailed operation state.
*/
innerState?: string | null;
/**
* Output only. A message containing the operation state.
*/
state?: string | null;
/**
* Output only. The time this state was entered.
*/
stateStartTime?: string | null;
}
/**
* A selector that chooses target cluster for jobs based on metadata.
*/
export interface Schema$ClusterSelector {
/**
* Required. The cluster labels. Cluster must have all labels to match.
*/
clusterLabels?: {
[key: string]: string;
} | null;
/**
* Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster.If unspecified, the zone of the first cluster matching the selector is used.
*/
zone?: string | null;
}
/**
* The status of a cluster and its instances.
*/
export interface Schema$ClusterStatus {
/**
* Optional. Output only. Details of cluster's state.
*/
detail?: string | null;
/**
* Output only. The cluster's state.
*/
state?: string | null;
/**
* Output only. Time when this state was entered (see JSON representation of Timestamp (https://developers.google.com/protocol-buffers/docs/proto3#json)).
*/
stateStartTime?: string | null;
/**
* Output only. Additional state information that includes status reported by the agent.
*/
substate?: string | null;
}
/**
* Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs)
*/
export interface Schema$ConfidentialInstanceConfig {
/**
* Optional. Defines whether the instance should have confidential compute enabled.
*/
enableConfidentialCompute?: boolean | null;
}
/**
* Dataproc metric config.
*/
export interface Schema$DataprocMetricConfig {
/**
* Required. Metrics sources to enable.
*/
metrics?: Schema$Metric[];
}
/**
* A request to collect cluster diagnostic information.
*/
export interface Schema$DiagnoseClusterRequest {
/**
* Optional. Time interval in which diagnosis should be carried out on the cluster.
*/
diagnosisInterval?: Schema$Interval;
/**
* Optional. DEPRECATED Specifies the job on which diagnosis is to be performed. Format: projects/{project\}/regions/{region\}/jobs/{job\}
*/
job?: string | null;
/**
* Optional. Specifies a list of jobs on which diagnosis is to be performed. Format: projects/{project\}/regions/{region\}/jobs/{job\}
*/
jobs?: string[] | null;
/**
* Optional. DEPRECATED Specifies the yarn application on which diagnosis is to be performed.
*/
yarnApplicationId?: string | null;
/**
* Optional. Specifies a list of yarn applications on which diagnosis is to be performed.
*/
yarnApplicationIds?: string[] | null;
}
/**
* The location of diagnostic output.
*/
export interface Schema$DiagnoseClusterResults {
/**
* Output only. The Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.
*/
outputUri?: string | null;
}
/**
* Specifies the config of disk options for a group of VM instances.
*/
export interface Schema$DiskConfig {
/**
* Optional. Size in GB of the boot disk (default is 500GB).
*/
bootDiskSizeGb?: number | null;
/**
* Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See Disk types (https://cloud.google.com/compute/docs/disks#disk-types).
*/
bootDiskType?: string | null;
/**
* Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), "nvme" (Non-Volatile Memory Express). See local SSD performance (https://cloud.google.com/compute/docs/disks/local-ssd#performance).
*/
localSsdInterface?: string | null;
/**
* Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.Note: Local SSD options may vary by machine type and number of vCPUs selected.
*/
numLocalSsds?: number | null;
}
/**
* Driver scheduling configuration.
*/
export interface Schema$DriverSchedulingConfig {
/**
* Required. The amount of memory in MB the driver is requesting.
*/
memoryMb?: number | null;
/**
* Required. The number of vCPUs the driver is requesting.
*/
vcores?: number | null;
}
/**
* A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \}
*/
export interface Schema$Empty {
}
/**
* Encryption settings for the cluster.
*/
export interface Schema$EncryptionConfig {
/**
* Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.
*/
gcePdKmsKeyName?: string | null;
/**
* Optional. The Cloud KMS key name to use for encrypting customer core content and cluster PD disk for all instances in the cluster.
*/
kmsKey?: string | null;
}
/**
* Endpoint config for this cluster
*/
export interface Schema$EndpointConfig {
/**
* Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.
*/
enableHttpPortAccess?: boolean | null;
/**
* Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.
*/
httpPorts?: {
[key: string]: string;
} | null;
}
/**
* Environment configuration for a workload.
*/
export interface Schema$EnvironmentConfig {
/**
* Optional. Execution configuration for a workload.
*/
executionConfig?: Schema$ExecutionConfig;
/**
* Optional. Peripherals configuration that workload has access to.
*/
peripheralsConfig?: Schema$PeripheralsConfig;
}
/**
* Execution configuration for a workload.
*/
export interface Schema$ExecutionConfig {
/**
* Optional. Applies to sessions only. The duration to keep the session alive while it's idling. Exceeding this threshold causes the session to terminate. This field cannot be set on a batch workload. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). Defaults to 4 hours if not set. If both ttl and idle_ttl are specified for an interactive session, the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
*/
idleTtl?: string | null;
/**
* Optional. The Cloud KMS key to use for encryption.
*/
kmsKey?: string | null;
/**
* Optional. Tags used for network traffic control.
*/
networkTags?: string[] | null;
/**
* Optional. Network URI to connect workload to.
*/
networkUri?: string | null;
/**
* Optional. Service account that used to execute workload.
*/
serviceAccount?: string | null;
/**
* Optional. A Cloud Storage bucket used to stage workload dependencies, config files, and store workload output and other ephemeral data, such as Spark history files. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location according to the region where your workload is running, and then create and manage project-level, per-location staging and temporary buckets. This field requires a Cloud Storage bucket name, not a gs://... URI to a Cloud Storage bucket.
*/
stagingBucket?: string | null;
/**
* Optional. Subnetwork URI to connect workload to.
*/
subnetworkUri?: string | null;
/**
* Optional. The duration after which the workload will be terminated. When the workload exceeds this duration, it will be unconditionally terminated without waiting for ongoing work to finish. If ttl is not specified for a batch workload, the workload will be allowed to run until it exits naturally (or runs forever without exiting). If ttl is not specified for an interactive session, it defaults to 24h. If ttl is not specified for a batch that uses 2.1+ runtime version, it defaults to 4h. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of Duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). If both ttl and idle_ttl are specified (for an interactive session), the conditions are treated as OR conditions: the workload will be terminated when it has been idle for idle_ttl or when ttl has been exceeded, whichever occurs first.
*/
ttl?: string | null;
}
/**
* Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec.Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
*/
export interface Schema$Expr {
/**
* Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
*/
description?: string | null;
/**
* Textual representation of an expression in Common Expression Language syntax.
*/
expression?: string | null;
/**
* Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
*/
location?: string | null;
/**
* Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
*/
title?: string | null;
}
/**
* A Dataproc job for running Apache Flink (https://flink.apache.org/) applications on YARN.
*/
export interface Schema$FlinkJob {
/**
* Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
*/
args?: string[] | null;
/**
* Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Flink driver and tasks.
*/
jarFileUris?: string[] | null;
/**
* Optional. The runtime log config for job execution.
*/
loggingConfig?: Schema$LoggingConfig;
/**
* The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.
*/
mainClass?: string | null;
/**
* The HCFS URI of the jar file that contains the main class.
*/
mainJarFileUri?: string | null;
/**
* Optional. A mapping of property names to values, used to configure Flink. Properties that conflict with values set by the Dataproc API may beoverwritten. Can include properties set in/etc/flink/conf/flink-defaults.conf and classes in user code.
*/
properties?: {
[key: string]: string;
} | null;
/**
* Optional. HCFS URI of the savepoint which contains the last saved progress for this job
*/
savepointUri?: string | null;
}
/**
* Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.
*/
export interface Schema$GceClusterConfig {
/**
* Optional. Confidential Instance Config for clusters using Confidential VMs (https://cloud.google.com/compute/confidential-vm/docs).
*/
confidentialInstanceConfig?: Schema$ConfidentialInstanceConfig;
/**
* Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
*/
internalIpOnly?: boolean | null;
/**
* Optional. The Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
*/
metadata?: {
[key: string]: string;
} | null;
/**
* Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see Using Subnetworks (https://cloud.google.com/compute/docs/subnetworks) for more information).A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default projects/[project_id]/global/networks/default default
*/
networkUri?: string | null;
/**
* Optional. Node Group Affinity for sole-tenant clusters.
*/
nodeGroupAffinity?: Schema$NodeGroupAffinity;
/**
* Optional. The type of IPv6 access for a cluster.
*/
privateIpv6GoogleAccess?: string | null;
/**
* Optional. Reservation Affinity for consuming Zonal reservation.
*/
reservationAffinity?: Schema$ReservationAffinity;
/**
* Optional. The Dataproc service account (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see VM Data Plane identity (https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services.If not specified, the Compute Engine default service account (https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
*/
serviceAccount?: string | null;
/**
* Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: https://www.googleapis.com/auth/cloud.useraccounts.readonly https://www.googleapis.com/auth/devstorage.read_write https://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided: https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/bigtable.admin.table https://www.googleapis.com/auth/bigtable.data https://www.googleapis.com/auth/devstorage.full_control
*/
serviceAccountScopes?: string[] | null;
/**
* Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs (https://cloud.google.com/security/shielded-cloud/shielded-vm).
*/
shieldedInstanceConfig?: Schema$ShieldedInstanceConfig;
/**
* Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0 projects/[project_id]/regions/[region]/subnetworks/sub0 sub0
*/
subnetworkUri?: string | null;
/**
* The Compute Engine tags to add to all instances (see Tagging instances (https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
*/
tags?: string[] | null;
/**
* Optional. The Compute Engine zone where the Dataproc cluster will be located. If omitted, the service will pick a zone in the cluster's Compute Engine region. On a get request, zone will always be present.A full URL, partial URI, or short name are valid. Examples: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] projects/[project_id]/zones/[zone] [zone]
*/
zoneUri?: string | null;
}
/**
* Request message for GetIamPolicy method.
*/
export interface Schema$GetIamPolicyRequest {
/**
* OPTIONAL: A GetPolicyOptions object for specifying options to GetIamPolicy.
*/
options?: Schema$GetPolicyOptions;
}
/**
* Encapsulates settings provided to GetIamPolicy.
*/
export interface Schema$GetPolicyOptions {
/**
* Optional. The maximum policy version that will be used to format the policy.Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected.Requests for policies with any conditional role bindings must specify version 3. Policies with no conditional role bindings may specify any valid value or leave the field unset.The policy in the response might use the policy version that you specified, or it might use a lower policy version. For example, if you specify version 3, but the policy has no conditional role bindings, the response uses version 1.To learn which resources support conditions in their IAM policies, see the IAM documentation (https://cloud.google.com/iam/help/conditions/resource-policies).
*/
requestedPolicyVersion?: number | null;
}
/**
* The cluster's GKE config.
*/
export interface Schema$GkeClusterConfig {
/**
* Optional. A target GKE cluster to deploy to. It must be in the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project\}/locations/{location\}/clusters/{cluster_id\}'
*/
gkeClusterTarget?: string | null;
/**
* Optional. Deprecated. Use gkeClusterTarget. Used only for the deprecated beta. A target for the deployment.
*/
namespacedGkeDeploymentTarget?: Schema$NamespacedGkeDeploymentTarget;
/**
* Optional. GKE node pools where workloads will be scheduled. At least one node pool must be assigned the DEFAULT GkeNodePoolTarget.Role. If a GkeNodePoolTarget is not specified, Dataproc constructs a DEFAULT GkeNodePoolTarget. Each role can be given to only one GkeNodePoolTarget. All node pools must have the same location settings.
*/
nodePoolTarget?: Schema$GkeNodePoolTarget[];
}
/**
* Parameters that describe cluster nodes.
*/
export interface Schema$GkeNodeConfig {
/**
* Optional. A list of hardware accelerators (https://cloud.google.com/compute/docs/gpus) to attach to each node.
*/
accelerators?: Schema$GkeNodePoolAcceleratorConfig[];
/**
* Optional. The Customer Managed Encryption Key (CMEK) (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek) used to encrypt the boot disk attached to each node in the node pool. Specify the key using the following format: projects/{project\}/locations/{location\}/keyRings/{key_ring\}/cryptoKeys/{crypto_key\}
*/
bootDiskKmsKey?: string | null;
/**
* Optional. The number of local SSD disks to attach to the node, which is limited by the maximum number of disks allowable per zone (see Adding Local SSDs (https://cloud.google.com/compute/docs/disks/local-ssd)).
*/
localSsdCount?: number | null;
/**
* Optional. The name of a Compute Engine machine type (https://cloud.google.com/compute/docs/machine-types).
*/
machineType?: string | null;
/**
* Optional. Minimum CPU platform (https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform) to be used by this instance. The instance may be scheduled on the specified or a newer CPU platform. Specify the friendly names of CPU platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
*/
minCpuPlatform?: string | null;
/**
* Optional. Whether the nodes are created as legacy preemptible VM instances (https://cloud.google.com/compute/docs/instances/preemptible). Also see Spot VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTROLLER role or in the DEFAULT node pool if the CONTROLLER role is not assigned (the DEFAULT node pool will assume the CONTROLLER role).
*/
preemptible?: boolean | null;
/**
* Optional. Whether the nodes are created as Spot VM instances (https://cloud.google.com/compute/docs/instances/spot). Spot VMs are the latest update to legacy preemptible VMs. Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible nodes cannot be used in a node pool with the CONTRO