UNPKG

googleapis

Version:
1,237 lines 387 kB
import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosResponseWithHTTP2, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { Readable } from 'stream'; export declare namespace dataflow_v1b3 { export interface Options extends GlobalOptions { version: 'v1b3'; } interface StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth; /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Dataflow API * * Manages Google Cloud Dataflow projects on Google Cloud Platform. * * @example * ```js * const {google} = require('googleapis'); * const dataflow = google.dataflow('v1b3'); * ``` */ export class Dataflow { context: APIRequestContext; projects: Resource$Projects; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * Obsolete in favor of ApproximateReportedProgress and ApproximateSplitRequest. */ export interface Schema$ApproximateProgress { /** * Obsolete. */ percentComplete?: number | null; /** * Obsolete. */ position?: Schema$Position; /** * Obsolete. */ remainingTime?: string | null; } /** * A progress measurement of a WorkItem by a worker. */ export interface Schema$ApproximateReportedProgress { /** * Total amount of parallelism in the portion of input of this task that has already been consumed and is no longer active. In the first two examples above (see remaining_parallelism), the value should be 29 or 2 respectively. The sum of remaining_parallelism and consumed_parallelism should equal the total amount of parallelism in this work item. If specified, must be finite. */ consumedParallelism?: Schema$ReportedParallelism; /** * Completion as fraction of the input consumed, from 0.0 (beginning, nothing consumed), to 1.0 (end of the input, entire input consumed). */ fractionConsumed?: number | null; /** * A Position within the work to represent a progress. */ position?: Schema$Position; /** * Total amount of parallelism in the input of this task that remains, (i.e. can be delegated to this task and any new tasks via dynamic splitting). Always at least 1 for non-finished work items and 0 for finished. "Amount of parallelism" refers to how many non-empty parts of the input can be read in parallel. This does not necessarily equal number of records. An input that can be read in parallel down to the individual records is called "perfectly splittable". An example of non-perfectly parallelizable input is a block-compressed file format where a block of records has to be read as a whole, but different blocks can be read in parallel. Examples: * If we are processing record #30 (starting at 1) out of 50 in a perfectly splittable 50-record input, this value should be 21 (20 remaining + 1 current). * If we are reading through block 3 in a block-compressed file consisting of 5 blocks, this value should be 3 (since blocks 4 and 5 can be processed in parallel by new tasks via dynamic splitting and the current task remains processing block 3). * If we are reading through the last block in a block-compressed file, or reading or processing the last record in a perfectly splittable input, this value should be 1, because apart from the current task, no additional remainder can be split off. */ remainingParallelism?: Schema$ReportedParallelism; } /** * A suggestion by the service to the worker to dynamically split the WorkItem. */ export interface Schema$ApproximateSplitRequest { /** * A fraction at which to split the work item, from 0.0 (beginning of the input) to 1.0 (end of the input). */ fractionConsumed?: number | null; /** * The fraction of the remainder of work to split the work item at, from 0.0 (split at the current position) to 1.0 (end of the input). */ fractionOfRemainder?: number | null; /** * A Position at which to split the work item. */ position?: Schema$Position; } /** * A structured message reporting an autoscaling decision made by the Dataflow service. */ export interface Schema$AutoscalingEvent { /** * The current number of workers the job has. */ currentNumWorkers?: string | null; /** * A message describing why the system decided to adjust the current number of workers, why it failed, or why the system decided to not make any changes to the number of workers. */ description?: Schema$StructuredMessage; /** * The type of autoscaling event to report. */ eventType?: string | null; /** * The target number of workers the worker pool wants to resize to use. */ targetNumWorkers?: string | null; /** * The time this event was emitted to indicate a new target or current num_workers value. */ time?: string | null; /** * A short and friendly name for the worker pool this event refers to. */ workerPool?: string | null; } /** * Settings for WorkerPool autoscaling. */ export interface Schema$AutoscalingSettings { /** * The algorithm to use for autoscaling. */ algorithm?: string | null; /** * The maximum number of workers to cap scaling at. */ maxNumWorkers?: number | null; } /** * Exponential buckets where the growth factor between buckets is `2**(2**-scale)`. e.g. for `scale=1` growth factor is `2**(2**(-1))=sqrt(2)`. `n` buckets will have the following boundaries. - 0th: [0, gf) - i in [1, n-1]: [gf^(i), gf^(i+1)) */ export interface Schema$Base2Exponent { /** * Must be greater than 0. */ numberOfBuckets?: number | null; /** * Must be between -3 and 3. This forces the growth factor of the bucket boundaries to be between `2^(1/8)` and `256`. */ scale?: number | null; } /** * Metadata for a BigQuery connector used by the job. */ export interface Schema$BigQueryIODetails { /** * Dataset accessed in the connection. */ dataset?: string | null; /** * Project accessed in the connection. */ projectId?: string | null; /** * Query used to access data in the connection. */ query?: string | null; /** * Table accessed in the connection. */ table?: string | null; } /** * Metadata for a Cloud Bigtable connector used by the job. */ export interface Schema$BigTableIODetails { /** * InstanceId accessed in the connection. */ instanceId?: string | null; /** * ProjectId accessed in the connection. */ projectId?: string | null; /** * TableId accessed in the connection. */ tableId?: string | null; } /** * The message type used for encoding metrics of type bounded trie. */ export interface Schema$BoundedTrie { /** * The maximum number of elements to store before truncation. */ bound?: number | null; /** * A compact representation of all the elements in this trie. */ root?: Schema$BoundedTrieNode; /** * A more efficient representation for metrics consisting of a single value. */ singleton?: string[] | null; } /** * A single node in a BoundedTrie. */ export interface Schema$BoundedTrieNode { /** * Children of this node. Must be empty if truncated is true. */ children?: { [key: string]: Schema$BoundedTrieNode; } | null; /** * Whether this node has been truncated. A truncated leaf represents possibly many children with the same prefix. */ truncated?: boolean | null; } /** * `BucketOptions` describes the bucket boundaries used in the histogram. */ export interface Schema$BucketOptions { /** * Bucket boundaries grow exponentially. */ exponential?: Schema$Base2Exponent; /** * Bucket boundaries grow linearly. */ linear?: Schema$Linear; } /** * Description of an interstitial value between transforms in an execution stage. */ export interface Schema$ComponentSource { /** * Dataflow service generated name for this source. */ name?: string | null; /** * User name for the original user transform or collection with which this source is most closely associated. */ originalTransformOrCollection?: string | null; /** * Human-readable name for this transform; may be user or system generated. */ userName?: string | null; } /** * Description of a transform executed as part of an execution stage. */ export interface Schema$ComponentTransform { /** * Dataflow service generated name for this source. */ name?: string | null; /** * User name for the original user transform with which this transform is most closely associated. */ originalTransform?: string | null; /** * Human-readable name for this transform; may be user or system generated. */ userName?: string | null; } /** * All configuration data for a particular Computation. */ export interface Schema$ComputationTopology { /** * The ID of the computation. */ computationId?: string | null; /** * The inputs to the computation. */ inputs?: Schema$StreamLocation[]; /** * The key ranges processed by the computation. */ keyRanges?: Schema$KeyRangeLocation[]; /** * The outputs from the computation. */ outputs?: Schema$StreamLocation[]; /** * The state family values. */ stateFamilies?: Schema$StateFamilyConfig[]; /** * The system stage name. */ systemStageName?: string | null; } /** * A position that encapsulates an inner position and an index for the inner position. A ConcatPosition can be used by a reader of a source that encapsulates a set of other sources. */ export interface Schema$ConcatPosition { /** * Index of the inner source. */ index?: number | null; /** * Position within the inner source. */ position?: Schema$Position; } /** * Container Spec. */ export interface Schema$ContainerSpec { /** * Default runtime environment for the job. */ defaultEnvironment?: Schema$FlexTemplateRuntimeEnvironment; /** * Name of the docker container image. E.g., gcr.io/project/some-image */ image?: string | null; /** * Cloud Storage path to self-signed certificate of private registry. */ imageRepositoryCertPath?: string | null; /** * Secret Manager secret id for password to authenticate to private registry. */ imageRepositoryPasswordSecretId?: string | null; /** * Secret Manager secret id for username to authenticate to private registry. */ imageRepositoryUsernameSecretId?: string | null; /** * Metadata describing a template including description and validation rules. */ metadata?: Schema$TemplateMetadata; /** * Required. SDK info of the Flex Template. */ sdkInfo?: Schema$SDKInfo; } /** * CounterMetadata includes all static non-name non-value counter attributes. */ export interface Schema$CounterMetadata { /** * Human-readable description of the counter semantics. */ description?: string | null; /** * Counter aggregation kind. */ kind?: string | null; /** * A string referring to the unit type. */ otherUnits?: string | null; /** * System defined Units, see above enum. */ standardUnits?: string | null; } /** * Identifies a counter within a per-job namespace. Counters whose structured names are the same get merged into a single value for the job. */ export interface Schema$CounterStructuredName { /** * Name of the optimized step being executed by the workers. */ componentStepName?: string | null; /** * Name of the stage. An execution step contains multiple component steps. */ executionStepName?: string | null; /** * Index of an input collection that's being read from/written to as a side input. The index identifies a step's side inputs starting by 1 (e.g. the first side input has input_index 1, the third has input_index 3). Side inputs are identified by a pair of (original_step_name, input_index). This field helps uniquely identify them. */ inputIndex?: number | null; /** * Counter name. Not necessarily globally-unique, but unique within the context of the other fields. Required. */ name?: string | null; /** * One of the standard Origins defined above. */ origin?: string | null; /** * The step name requesting an operation, such as GBK. I.e. the ParDo causing a read/write from shuffle to occur, or a read from side inputs. */ originalRequestingStepName?: string | null; /** * System generated name of the original step in the user's graph, before optimization. */ originalStepName?: string | null; /** * A string containing a more specific namespace of the counter's origin. */ originNamespace?: string | null; /** * Portion of this counter, either key or value. */ portion?: string | null; /** * ID of a particular worker. */ workerId?: string | null; } /** * A single message which encapsulates structured name and metadata for a given counter. */ export interface Schema$CounterStructuredNameAndMetadata { /** * Metadata associated with a counter */ metadata?: Schema$CounterMetadata; /** * Structured name of the counter. */ name?: Schema$CounterStructuredName; } /** * An update to a Counter sent from a worker. Next ID: 17 */ export interface Schema$CounterUpdate { /** * Boolean value for And, Or. */ boolean?: boolean | null; /** * Bounded trie data */ boundedTrie?: Schema$BoundedTrie; /** * True if this counter is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this counter is reported as a delta. */ cumulative?: boolean | null; /** * Distribution data */ distribution?: Schema$DistributionUpdate; /** * Floating point value for Sum, Max, Min. */ floatingPoint?: number | null; /** * List of floating point numbers, for Set. */ floatingPointList?: Schema$FloatingPointList; /** * Floating point mean aggregation value for Mean. */ floatingPointMean?: Schema$FloatingPointMean; /** * Integer value for Sum, Max, Min. */ integer?: Schema$SplitInt64; /** * Gauge data */ integerGauge?: Schema$IntegerGauge; /** * List of integers, for Set. */ integerList?: Schema$IntegerList; /** * Integer mean aggregation value for Mean. */ integerMean?: Schema$IntegerMean; /** * Value for internally-defined counters used by the Dataflow service. */ internal?: any | null; /** * Counter name and aggregation type. */ nameAndKind?: Schema$NameAndKind; /** * The service-generated short identifier for this counter. The short_id -\> (name, metadata) mapping is constant for the lifetime of a job. */ shortId?: string | null; /** * List of strings, for Set. */ stringList?: Schema$StringList; /** * Counter structured name and metadata. */ structuredNameAndMetadata?: Schema$CounterStructuredNameAndMetadata; } /** * Modeled after information exposed by /proc/stat. */ export interface Schema$CPUTime { /** * Average CPU utilization rate (% non-idle cpu / second) since previous sample. */ rate?: number | null; /** * Timestamp of the measurement. */ timestamp?: string | null; /** * Total active CPU time across all cores (ie., non-idle) in milliseconds since start-up. */ totalMs?: string | null; } /** * A request to create a Cloud Dataflow job from a template. */ export interface Schema$CreateJobFromTemplateRequest { /** * The runtime environment for the job. */ environment?: Schema$RuntimeEnvironment; /** * Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`. */ gcsPath?: string | null; /** * Required. The job name to use for the created job. */ jobName?: string | null; /** * The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request. */ location?: string | null; /** * The runtime parameters to pass to the job. */ parameters?: { [key: string]: string; } | null; } /** * Identifies the location of a custom souce. */ export interface Schema$CustomSourceLocation { /** * Whether this source is stateful. */ stateful?: boolean | null; } /** * Data disk assignment for a given VM instance. */ export interface Schema$DataDiskAssignment { /** * Mounted data disks. The order is important a data disk's 0-based index in this list defines which persistent directory the disk is mounted to, for example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" \}, { "myproject-1014-104817-4c2-harness-0-disk-1" \}. */ dataDisks?: string[] | null; /** * VM instance name the data disks mounted to, for example "myproject-1014-104817-4c2-harness-0". */ vmInstance?: string | null; } /** * The gauge value of a metric. */ export interface Schema$DataflowGaugeValue { /** * The timestamp when the gauge was recorded. */ measuredTime?: string | null; /** * The value of the gauge. */ value?: string | null; } /** * Summary statistics for a population of values. HistogramValue contains a sequence of buckets and gives a count of values that fall into each bucket. Bucket boundares are defined by a formula and bucket widths are either fixed or exponentially increasing. */ export interface Schema$DataflowHistogramValue { /** * Optional. The number of values in each bucket of the histogram, as described in `bucket_options`. `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If `bucket_counts` has fewer than N values, the remaining values are assumed to be 0. */ bucketCounts?: string[] | null; /** * Describes the bucket boundaries used in the histogram. */ bucketOptions?: Schema$BucketOptions; /** * Number of values recorded in this histogram. */ count?: string | null; /** * Statistics on the values recorded in the histogram that fall out of the bucket boundaries. */ outlierStats?: Schema$OutlierStats; } /** * Configuration options for sampling elements. */ export interface Schema$DataSamplingConfig { /** * List of given sampling behaviors to enable. For example, specifying behaviors = [ALWAYS_ON] samples in-flight elements but does not sample exceptions. Can be used to specify multiple behaviors like, behaviors = [ALWAYS_ON, EXCEPTIONS] for specifying periodic sampling and exception sampling. If DISABLED is in the list, then sampling will be disabled and ignore the other given behaviors. Ordering does not matter. */ behaviors?: string[] | null; } /** * Contains per-worker telemetry about the data sampling feature. */ export interface Schema$DataSamplingReport { /** * Optional. Delta of bytes written to file from previous report. */ bytesWrittenDelta?: string | null; /** * Optional. Delta of bytes sampled from previous report. */ elementsSampledBytes?: string | null; /** * Optional. Delta of number of elements sampled from previous report. */ elementsSampledCount?: string | null; /** * Optional. Delta of number of samples taken from user code exceptions from previous report. */ exceptionsSampledCount?: string | null; /** * Optional. Delta of number of PCollections sampled from previous report. */ pcollectionsSampledCount?: string | null; /** * Optional. Delta of errors counts from persisting the samples from previous report. */ persistenceErrorsCount?: string | null; /** * Optional. Delta of errors counts from retrieving, or translating the samples from previous report. */ translationErrorsCount?: string | null; } /** * Metadata for a Datastore connector used by the job. */ export interface Schema$DatastoreIODetails { /** * Namespace used in the connection. */ namespace?: string | null; /** * ProjectId accessed in the connection. */ projectId?: string | null; } /** * Describes any options that have an effect on the debugging of pipelines. */ export interface Schema$DebugOptions { /** * Configuration options for sampling elements from a running pipeline. */ dataSampling?: Schema$DataSamplingConfig; /** * Optional. When true, enables the logging of the literal hot key to the user's Cloud Logging. */ enableHotKeyLogging?: boolean | null; } /** * Response from deleting a snapshot. */ export interface Schema$DeleteSnapshotResponse { } /** * Specification of one of the bundles produced as a result of splitting a Source (e.g. when executing a SourceSplitRequest, or when splitting an active task using WorkItemStatus.dynamic_source_split), relative to the source being split. */ export interface Schema$DerivedSource { /** * What source to base the produced source on (if any). */ derivationMode?: string | null; /** * Specification of the source. */ source?: Schema$Source; } /** * Describes the data disk used by a workflow job. */ export interface Schema$Disk { /** * Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard */ diskType?: string | null; /** * Directory in a VM where disk is mounted. */ mountPoint?: string | null; /** * Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default. */ sizeGb?: number | null; } /** * Data provided with a pipeline or transform to provide descriptive info. */ export interface Schema$DisplayData { /** * Contains value if the data is of a boolean type. */ boolValue?: boolean | null; /** * Contains value if the data is of duration type. */ durationValue?: string | null; /** * Contains value if the data is of float type. */ floatValue?: number | null; /** * Contains value if the data is of int64 type. */ int64Value?: string | null; /** * Contains value if the data is of java class type. */ javaClassValue?: string | null; /** * The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system. */ key?: string | null; /** * An optional label to display in a dax UI for the element. */ label?: string | null; /** * The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering. */ namespace?: string | null; /** * A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip. */ shortStrValue?: string | null; /** * Contains value if the data is of string type. */ strValue?: string | null; /** * Contains value if the data is of timestamp type. */ timestampValue?: string | null; /** * An optional full URL. */ url?: string | null; } /** * A metric value representing a distribution. */ export interface Schema$DistributionUpdate { /** * The count of the number of elements present in the distribution. */ count?: Schema$SplitInt64; /** * (Optional) Histogram of value counts for the distribution. */ histogram?: Schema$Histogram; /** * The maximum value present in the distribution. */ max?: Schema$SplitInt64; /** * The minimum value present in the distribution. */ min?: Schema$SplitInt64; /** * Use an int64 since we'd prefer the added precision. If overflow is a common problem we can detect it and use an additional int64 or a double. */ sum?: Schema$SplitInt64; /** * Use a double since the sum of squares is likely to overflow int64. */ sumOfSquares?: number | null; } /** * When a task splits using WorkItemStatus.dynamic_source_split, this message describes the two parts of the split relative to the description of the current task's input. */ export interface Schema$DynamicSourceSplit { /** * Primary part (continued to be processed by worker). Specified relative to the previously-current source. Becomes current. */ primary?: Schema$DerivedSource; /** * Residual part (returned to the pool of work). Specified relative to the previously-current source. */ residual?: Schema$DerivedSource; } /** * Describes the environment in which a Dataflow Job runs. */ export interface Schema$Environment { /** * The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com". */ clusterManagerApiService?: string | null; /** * Optional. The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset\} */ dataset?: string | null; /** * Optional. Any debugging options to be supplied to the job. */ debugOptions?: Schema$DebugOptions; /** * The list of experiments to enable. This field should be used for SDK related experiments and not for service related experiments. The proper field for service related experiments is service_options. */ experiments?: string[] | null; /** * Optional. Which Flexible Resource Scheduling mode to run in. */ flexResourceSchedulingGoal?: string | null; /** * Experimental settings. */ internalExperiments?: { [key: string]: any; } | null; /** * The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way. */ sdkPipelineOptions?: { [key: string]: any; } | null; /** * Optional. Identity to run virtual machines as. Defaults to the default account. */ serviceAccountEmail?: string | null; /** * Optional. If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY */ serviceKmsKeyName?: string | null; /** * Optional. The list of service options to enable. This field should be used for service related experiments only. These experiments, when graduating to GA, should be replaced by dedicated fields or become default (i.e. always on). */ serviceOptions?: string[] | null; /** * Output only. The shuffle mode used for the job. */ shuffleMode?: string | null; /** * Optional. Specifies the Streaming Engine message processing guarantees. Reduces cost and latency but might result in duplicate messages committed to storage. Designed to run simple mapping streaming ETL jobs at the lowest cost. For example, Change Data Capture (CDC) to BigQuery is a canonical use case. For more information, see [Set the pipeline streaming mode](https://cloud.google.com/dataflow/docs/guides/streaming-modes). */ streamingMode?: string | null; /** * The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME\} to this resource prefix, where {JOBNAME\} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket\}/{object\} bucket.storage.googleapis.com/{object\} */ tempStoragePrefix?: string | null; /** * Optional. True when any worker pool that uses public IPs is present. */ usePublicIps?: boolean | null; /** * Optional. A description of the process that generated the request. */ userAgent?: { [key: string]: any; } | null; /** * Output only. Whether the job uses the Streaming Engine resource-based billing model. */ useStreamingEngineResourceBasedBilling?: boolean | null; /** * A structure describing which components and their versions of the service are required in order to run the job. */ version?: { [key: string]: any; } | null; /** * The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers. */ workerPools?: Schema$WorkerPool[]; /** * Optional. The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region. */ workerRegion?: string | null; /** * Optional. The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. */ workerZone?: string | null; } /** * A message describing the state of a particular execution stage. */ export interface Schema$ExecutionStageState { /** * The time at which the stage transitioned to this state. */ currentStateTime?: string | null; /** * The name of the execution stage. */ executionStageName?: string | null; /** * Executions stage states allow the same set of values as JobState. */ executionStageState?: string | null; } /** * Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning. */ export interface Schema$ExecutionStageSummary { /** * Collections produced and consumed by component transforms of this stage. */ componentSource?: Schema$ComponentSource[]; /** * Transforms that comprise this execution stage. */ componentTransform?: Schema$ComponentTransform[]; /** * Dataflow service generated id for this stage. */ id?: string | null; /** * Input sources for this stage. */ inputSource?: Schema$StageSource[]; /** * Type of transform this stage is executing. */ kind?: string | null; /** * Dataflow service generated name for this stage. */ name?: string | null; /** * Output sources for this stage. */ outputSource?: Schema$StageSource[]; /** * Other stages that must complete before this stage can run. */ prerequisiteStage?: string[] | null; } /** * Indicates which [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) failed to respond to a request for data. */ export interface Schema$FailedLocation { /** * The name of the [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that failed to respond. */ name?: string | null; } /** * Metadata for a File connector used by the job. */ export interface Schema$FileIODetails { /** * File Pattern used to access files by the connector. */ filePattern?: string | null; } /** * An instruction that copies its inputs (zero or more) to its (single) output. */ export interface Schema$FlattenInstruction { /** * Describes the inputs to the flatten instruction. */ inputs?: Schema$InstructionInput[]; } /** * The environment values to be set at runtime for flex template. */ export interface Schema$FlexTemplateRuntimeEnvironment { /** * Additional experiment flags for the job. */ additionalExperiments?: string[] | null; /** * Optional. Additional pipeline option flags for the job. */ additionalPipelineOptions?: string[] | null; /** * Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" \}. */ additionalUserLabels?: { [key: string]: string; } | null; /** * The algorithm to use for autoscaling */ autoscalingAlgorithm?: string | null; /** * Worker disk size, in gigabytes. */ diskSizeGb?: number | null; /** * If true, when processing time is spent almost entirely on garbage collection (GC), saves a heap dump before ending the thread or process. If false, ends the thread or process without saving a heap dump. Does not save a heap dump when the Java Virtual Machine (JVM) has an out of memory error during processing. The location of the heap file is either echoed back to the user, or the user is given the opportunity to download the heap file. */ dumpHeapOnOom?: boolean | null; /** * If true serial port logging will be enabled for the launcher VM. */ enableLauncherVmSerialPortLogging?: boolean | null; /** * Whether to enable Streaming Engine for the job. */ enableStreamingEngine?: boolean | null; /** * Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs */ flexrsGoal?: string | null; /** * Configuration for VM IPs. */ ipConfiguration?: string | null; /** * Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/ */ kmsKeyName?: string | null; /** * The machine type to use for launching the job. The default is n1-standard-1. */ launcherMachineType?: string | null; /** * The machine type to use for the job. Defaults to the value from the template if not specified. */ machineType?: string | null; /** * The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000. */ maxWorkers?: number | null; /** * Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default". */ network?: string | null; /** * The initial number of Google Compute Engine instances for the job. */ numWorkers?: number | null; /** * Cloud Storage bucket (directory) to upload heap dumps to. Enabling this field implies that `dump_heap_on_oom` is set to true. */ saveHeapDumpsToGcsPath?: string | null; /** * Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines. */ sdkContainerImage?: string | null; /** * The email address of the service account to run the job as. */ serviceAccountEmail?: string | null; /** * The Cloud Storage path for staging local files. Must be a valid Cloud Storage URL, beginning with `gs://`. */ stagingLocation?: string | null; /** * Optional. Specifies the Streaming Engine message processing guarantees. Reduces cost and latency but might result in duplicate messages committed to storage. Designed to run simple mapping streaming ETL jobs at the lowest cost. For example, Change Data Capture (CDC) to BigQuery is a canonical use case. For more information, see [Set the pipeline streaming mode](https://cloud.google.com/dataflow/docs/guides/streaming-modes). */ streamingMode?: string | null; /** * Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL. */ subnetwork?: string | null; /** * The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`. */ tempLocation?: string | null; /** * The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region. */ workerRegion?: string | null; /** * The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence. */ workerZone?: string | null; /** * The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence. */ zone?: string | null; } /** * A metric value representing a list of floating point numbers. */ export interface Schema$FloatingPointList { /** * Elements of the list. */ elements?: number[] | null; } /** * A representation of a floating point mean metric contribution. */ export interface Schema$FloatingPointMean { /** * The number of values being aggregated. */ count?: Schema$SplitInt64; /** * The sum of all values being aggregated. */ sum?: number | null; } /** * Request to get updated debug configuration for component. */ export interface Schema$GetDebugConfigRequest { /** * The internal component id for which debug configuration is requested. */ componentId?: string | null; /** * The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains the job specified by job_id. */ location?: string | null; /** * The worker id, i.e., VM hostname. */ workerId?: string | null; } /** * Response to a get debug configuration request. */ export interface Schema$GetDebugConfigResponse { /** * The encoded debug configuration for the requested component. */ config?: string | null; } /** * The response to a GetTemplate request. */ export interface Schema$GetTemplateResponse { /** * The template metadata describing the template name, available parameters, etc. */ metadata?: Schema$TemplateMetadata; /** * Describes the runtime metadata with SDKInfo and available parameters. */ runtimeMetadata?: Schema$RuntimeMetadata; /** * The status of the get template request. Any problems with the request will be indicated in the error_details. */ status?: Schema$Status; /** * Template Type. */ templateType?: string | null; } /** * Request to get worker stacktraces from debug capture. */ export interface Schema$GetWorkerStacktracesRequest { /** * The worker for which to get stacktraces. The returned stacktraces will be for the SDK harness running on this worker. */ workerId?: string | null; } /** * Response to get worker stacktraces from debug capture. */ export interface Schema$GetWorkerStacktracesResponse { /** * Repeated as unified worker may have multiple SDK processes. */ sdks?: Schema$Sdk[]; } /** * Information about the GPU usage on the worker. */ export interface Schema$GPUUsage { /** * Required. Timestamp of the measurement. */ timestamp?: string | null; /** * Required. Utilization info about the GPU. */ utilization?: Schema$GPUUtilization; } /** * Utilization details about the GPU. */ export interface Schema$GPUUtilization { /** * Required. GPU utilization rate of any kernel over the last sample period in the range of [0, 1]. */ rate?: number | null; } /** * Histogram of value counts for a distribution. Buckets have an inclusive lower bound and exclusive upper bound and use "1,2,5 bucketing": The first bucket range is from [0,1) and all subsequent bucket boundaries are powers of ten multiplied by 1, 2, or 5. Thus, bucket boundaries are 0, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, ... Negative values are not supported. */ export interface Schema$Histogram { /** * Counts of values in each bucket. For efficiency, prefix and trailing buckets with count = 0 are elided. Buckets can store the full range of values of an unsigned long, with ULLONG_MAX falling into the 59th bucket with range [1e19, 2e19). */ bucketCounts?: string[] | null; /** * Starting index of first stored bucket. The non-inclusive upper-bound of the ith bucket is given by: pow(10,(i-first_bucket_offset)/3) * (1,2,5)[(i-first_bucket_offset)%3] */ firstBucketOffset?: number | null; } /** * Information useful for debugging a hot key detection. */ export interface Schema$HotKeyDebuggingInfo { /** * Debugging information for each detected hot key. Keyed by a hash of the key. */ detectedHotKeys?: { [key: string]: Schema$HotKeyInfo; } | nu