UNPKG

googleapis

Version:
776 lines 104 kB
/** * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { OAuth2Client, JWT, Compute, UserRefreshClient } from 'google-auth-library'; import { GoogleConfigurable, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { GaxiosPromise } from 'gaxios'; export declare namespace remotebuildexecution_v1 { interface Options extends GlobalOptions { version: 'v1'; } interface StandardParameters { /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Remote Build Execution API * * Supplies a Remote Execution API service for tools such as bazel. * * @example * const {google} = require('googleapis'); * const remotebuildexecution = google.remotebuildexecution('v1'); * * @namespace remotebuildexecution * @type {Function} * @version v1 * @variation v1 * @param {object=} options Options for Remotebuildexecution */ class Remotebuildexecution { context: APIRequestContext; media: Resource$Media; operations: Resource$Operations; projects: Resource$Projects; v1: Resource$V1; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * An `Action` captures all the information about an execution which is required to reproduce it. `Action`s are the core component of the [Execution] service. A single `Action` represents a repeatable action that can be performed by the execution service. `Action`s can be succinctly identified by the digest of their wire format encoding and, once an `Action` has been executed, will be cached in the action cache. Future requests can then use the cached result rather than needing to run afresh. When a server completes execution of an Action, it MAY choose to cache the result in the ActionCache unless `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By default, future calls to Execute the same `Action` will also serve their results from the cache. Clients must take care to understand the caching behaviour. Ideally, all `Action`s will be reproducible so that serving a result from cache is always desirable and correct. */ interface Schema$BuildBazelRemoteExecutionV2Action { /** * The digest of the Command to run, which MUST be present in the ContentAddressableStorage. */ commandDigest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * If true, then the `Action`'s result cannot be cached, and in-flight requests for the same `Action` may not be merged. */ doNotCache?: boolean; /** * The digest of the root Directory for the input files. The files in the directory tree are available in the correct location on the build machine before the command is executed. The root directory, as well as every subdirectory and content blob referred to, MUST be in the ContentAddressableStorage. */ inputRootDigest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * A timeout after which the execution should be killed. If the timeout is absent, then the client is specifying that the execution should continue as long as the server will let it. The server SHOULD impose a timeout if the client does not specify one, however, if the client does specify a timeout that is longer than the server's maximum timeout, the server MUST reject the request. The timeout is a part of the Action message, and therefore two `Actions` with different timeouts are different, even if they are otherwise identical. This is because, if they were not, running an `Action` with a lower timeout than is required might result in a cache hit from an execution run with a longer timeout, hiding the fact that the timeout is too short. By encoding it directly in the `Action`, a lower timeout will result in a cache miss and the execution timeout will fail immediately, rather than whenever the cache entry gets evicted. */ timeout?: string; } /** * An ActionResult represents the result of an Action being run. */ interface Schema$BuildBazelRemoteExecutionV2ActionResult { /** * The details of the execution that originally produced this result. */ executionMetadata?: Schema$BuildBazelRemoteExecutionV2ExecutedActionMetadata; /** * The exit code of the command. */ exitCode?: number; /** * The output directories of the action. For each output directory requested in the `output_directories` field of the Action, if the corresponding directory existed after the action completed, a single entry will be present in the output list, which will contain the digest of a Tree message containing the directory tree, and the path equal exactly to the corresponding Action output_directories member. As an example, suppose the Action had an output directory `a/b/dir` and the execution produced the following contents in `a/b/dir`: a file named `bar` and a directory named `foo` with an executable file named `baz`. Then, output_directory will contain (hashes shortened for readability): ```json // OutputDirectory proto: { path: "a/b/dir" tree_digest: { hash: "4a73bc9d03...", size: 55 } } // Tree proto with hash "4a73bc9d03..." and size 55: { root: { files: [ { name: "bar", digest: { hash: "4a73bc9d03...", size: 65534 } } ], directories: [ { name: "foo", digest: { hash: "4cf2eda940...", size: 43 } } ] } children : { // (Directory proto with hash "4cf2eda940..." and size 43) files: [ { name: "baz", digest: { hash: "b2c941073e...", size: 1294, }, is_executable: true } ] } } ``` If an output of the same name was found, but was not a directory, the server will return a FAILED_PRECONDITION. */ outputDirectories?: Schema$BuildBazelRemoteExecutionV2OutputDirectory[]; /** * The output directories of the action that are symbolic links to other directories. Those may be links to other output directories, or input directories, or even absolute paths outside of the working directory, if the server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output directory requested in the `output_directories` field of the Action, if the directory existed after the action completed, a single entry will be present either in this field, or in the `output_directories` field, if the directory was not a symbolic link. If an output of the same name was found, but was a symbolic link to a file instead of a directory, the server will return a FAILED_PRECONDITION. If the action does not produce the requested output, then that output will be omitted from the list. The server is free to arrange the output list as desired; clients MUST NOT assume that the output list is sorted. */ outputDirectorySymlinks?: Schema$BuildBazelRemoteExecutionV2OutputSymlink[]; /** * The output files of the action. For each output file requested in the `output_files` field of the Action, if the corresponding file existed after the action completed, a single entry will be present either in this field, or the `output_file_symlinks` field if the file was a symbolic link to another file. If an output of the same name was found, but was a directory rather than a regular file, the server will return a FAILED_PRECONDITION. If the action does not produce the requested output, then that output will be omitted from the list. The server is free to arrange the output list as desired; clients MUST NOT assume that the output list is sorted. */ outputFiles?: Schema$BuildBazelRemoteExecutionV2OutputFile[]; /** * The output files of the action that are symbolic links to other files. Those may be links to other output files, or input files, or even absolute paths outside of the working directory, if the server supports SymlinkAbsolutePathStrategy.ALLOWED. For each output file requested in the `output_files` field of the Action, if the corresponding file existed after the action completed, a single entry will be present either in this field, or in the `output_files` field, if the file was not a symbolic link. If an output symbolic link of the same name was found, but its target type was not a regular file, the server will return a FAILED_PRECONDITION. If the action does not produce the requested output, then that output will be omitted from the list. The server is free to arrange the output list as desired; clients MUST NOT assume that the output list is sorted. */ outputFileSymlinks?: Schema$BuildBazelRemoteExecutionV2OutputSymlink[]; /** * The digest for a blob containing the standard error of the action, which can be retrieved from the ContentAddressableStorage. */ stderrDigest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * The standard error buffer of the action. The server SHOULD NOT inline stderr unless requested by the client in the GetActionResultRequest message. The server MAY omit inlining, even if requested, and MUST do so if inlining would cause the response to exceed message size limits. */ stderrRaw?: string; /** * The digest for a blob containing the standard output of the action, which can be retrieved from the ContentAddressableStorage. */ stdoutDigest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * The standard output buffer of the action. The server SHOULD NOT inline stdout unless requested by the client in the GetActionResultRequest message. The server MAY omit inlining, even if requested, and MUST do so if inlining would cause the response to exceed message size limits. */ stdoutRaw?: string; } /** * A `Command` is the actual command executed by a worker running an Action and specifications of its environment. Except as otherwise required, the environment (such as which system libraries or binaries are available, and what filesystems are mounted where) is defined by and specific to the implementation of the remote execution API. */ interface Schema$BuildBazelRemoteExecutionV2Command { /** * The arguments to the command. The first argument must be the path to the executable, which must be either a relative path, in which case it is evaluated with respect to the input root, or an absolute path. */ arguments?: string[]; /** * The environment variables to set when running the program. The worker may provide its own default environment variables; these defaults can be overridden using this field. Additional variables can also be specified. In order to ensure that equivalent Commands always hash to the same value, the environment variables MUST be lexicographically sorted by name. Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. */ environmentVariables?: Schema$BuildBazelRemoteExecutionV2CommandEnvironmentVariable[]; /** * A list of the output directories that the client expects to retrieve from the action. Only the listed directories will be returned (an entire directory structure will be returned as a Tree message digest, see OutputDirectory), as well as files listed in `output_files`. Other files or directories that may be created during command execution are discarded. The paths are relative to the working directory of the action execution. The paths are specified using a single forward slash (`/`) as a path separator, even if the execution platform natively uses a different separator. The path MUST NOT include a trailing slash, nor a leading slash, being a relative path. The special value of empty string is allowed, although not recommended, and can be used to capture the entire working directory tree, including inputs. In order to ensure consistent hashing of the same Action, the output paths MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 bytes). An output directory cannot be duplicated or have the same path as any of the listed output files. An output directory is allowed to be a parent of another output directory. Directories leading up to the output directories (but not the output directories themselves) are created by the worker prior to execution, even if they are not explicitly part of the input root. */ outputDirectories?: string[]; /** * A list of the output files that the client expects to retrieve from the action. Only the listed files, as well as directories listed in `output_directories`, will be returned to the client as output. Other files or directories that may be created during command execution are discarded. The paths are relative to the working directory of the action execution. The paths are specified using a single forward slash (`/`) as a path separator, even if the execution platform natively uses a different separator. The path MUST NOT include a trailing slash, nor a leading slash, being a relative path. In order to ensure consistent hashing of the same Action, the output paths MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 bytes). An output file cannot be duplicated, be a parent of another output file, or have the same path as any of the listed output directories. Directories leading up to the output files are created by the worker prior to execution, even if they are not explicitly part of the input root. */ outputFiles?: string[]; /** * The platform requirements for the execution environment. The server MAY choose to execute the action on any worker satisfying the requirements, so the client SHOULD ensure that running the action on any such worker will have the same result. A detailed lexicon for this can be found in the accompanying platform.md. */ platform?: Schema$BuildBazelRemoteExecutionV2Platform; /** * The working directory, relative to the input root, for the command to run in. It must be a directory which exists in the input tree. If it is left empty, then the action is run in the input root. */ workingDirectory?: string; } /** * An `EnvironmentVariable` is one variable to set in the running program's environment. */ interface Schema$BuildBazelRemoteExecutionV2CommandEnvironmentVariable { /** * The variable name. */ name?: string; /** * The variable value. */ value?: string; } /** * A content digest. A digest for a given blob consists of the size of the blob and its hash. The hash algorithm to use is defined by the server, but servers SHOULD use SHA-256. The size is considered to be an integral part of the digest and cannot be separated. That is, even if the `hash` field is correctly specified but `size_bytes` is not, the server MUST reject the request. The reason for including the size in the digest is as follows: in a great many cases, the server needs to know the size of the blob it is about to work with prior to starting an operation with it, such as flattening Merkle tree structures or streaming it to a worker. Technically, the server could implement a separate metadata store, but this results in a significantly more complicated implementation as opposed to having the client specify the size up-front (or storing the size along with the digest in every message where digests are embedded). This does mean that the API leaks some implementation details of (what we consider to be) a reasonable server implementation, but we consider this to be a worthwhile tradeoff. When a `Digest` is used to refer to a proto message, it always refers to the message in binary encoded form. To ensure consistent hashing, clients and servers MUST ensure that they serialize messages according to the following rules, even if there are alternate valid encodings for the same message: * Fields are serialized in tag order. * There are no unknown fields. * There are no duplicate fields. * Fields are serialized according to the default semantics for their type. Most protocol buffer implementations will always follow these rules when serializing, but care should be taken to avoid shortcuts. For instance, concatenating two messages to merge them may produce duplicate fields. */ interface Schema$BuildBazelRemoteExecutionV2Digest { /** * The hash. In the case of SHA-256, it will always be a lowercase hex string exactly 64 characters long. */ hash?: string; /** * The size of the blob, in bytes. */ sizeBytes?: string; } /** * A `Directory` represents a directory node in a file tree, containing zero or more children FileNodes, DirectoryNodes and SymlinkNodes. Each `Node` contains its name in the directory, either the digest of its content (either a file blob or a `Directory` proto) or a symlink target, as well as possibly some metadata about the file or directory. In order to ensure that two equivalent directory trees hash to the same value, the following restrictions MUST be obeyed when constructing a a `Directory`: * Every child in the directory must have a path of exactly one segment. Multiple levels of directory hierarchy may not be collapsed. * Each child in the directory must have a unique path segment (file name). Note that while the API itself is case-sensitive, the environment where the Action is executed may or may not be case-sensitive. That is, it is legal to call the API with a Directory that has both "Foo" and "foo" as children, but the Action may be rejected by the remote system upon execution. * The files, directories and symlinks in the directory must each be sorted in lexicographical order by path. The path strings must be sorted by code point, equivalently, by UTF-8 bytes. A `Directory` that obeys the restrictions is said to be in canonical form. As an example, the following could be used for a file named `bar` and a directory named `foo` with an executable file named `baz` (hashes shortened for readability): ```json // (Directory proto) { files: [ { name: "bar", digest: { hash: "4a73bc9d03...", size: 65534 } } ], directories: [ { name: "foo", digest: { hash: "4cf2eda940...", size: 43 } } ] } // (Directory proto with hash "4cf2eda940..." and size 43) { files: [ { name: "baz", digest: { hash: "b2c941073e...", size: 1294, }, is_executable: true } ] } ``` */ interface Schema$BuildBazelRemoteExecutionV2Directory { /** * The subdirectories in the directory. */ directories?: Schema$BuildBazelRemoteExecutionV2DirectoryNode[]; /** * The files in the directory. */ files?: Schema$BuildBazelRemoteExecutionV2FileNode[]; /** * The symlinks in the directory. */ symlinks?: Schema$BuildBazelRemoteExecutionV2SymlinkNode[]; } /** * A `DirectoryNode` represents a child of a Directory which is itself a `Directory` and its associated metadata. */ interface Schema$BuildBazelRemoteExecutionV2DirectoryNode { /** * The digest of the Directory object represented. See Digest for information about how to take the digest of a proto message. */ digest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * The name of the directory. */ name?: string; } /** * ExecutedActionMetadata contains details about a completed execution. */ interface Schema$BuildBazelRemoteExecutionV2ExecutedActionMetadata { /** * When the worker completed executing the action command. */ executionCompletedTimestamp?: string; /** * When the worker started executing the action command. */ executionStartTimestamp?: string; /** * When the worker finished fetching action inputs. */ inputFetchCompletedTimestamp?: string; /** * When the worker started fetching action inputs. */ inputFetchStartTimestamp?: string; /** * When the worker finished uploading action outputs. */ outputUploadCompletedTimestamp?: string; /** * When the worker started uploading action outputs. */ outputUploadStartTimestamp?: string; /** * When was the action added to the queue. */ queuedTimestamp?: string; /** * The name of the worker which ran the execution. */ worker?: string; /** * When the worker completed the action, including all stages. */ workerCompletedTimestamp?: string; /** * When the worker received the action. */ workerStartTimestamp?: string; } /** * Metadata about an ongoing execution, which will be contained in the metadata field of the Operation. */ interface Schema$BuildBazelRemoteExecutionV2ExecuteOperationMetadata { /** * The digest of the Action being executed. */ actionDigest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * The current stage of execution. */ stage?: string; /** * If set, the client can use this name with ByteStream.Read to stream the standard error. */ stderrStreamName?: string; /** * If set, the client can use this name with ByteStream.Read to stream the standard output. */ stdoutStreamName?: string; } /** * The response message for Execution.Execute, which will be contained in the response field of the Operation. */ interface Schema$BuildBazelRemoteExecutionV2ExecuteResponse { /** * True if the result was served from cache, false if it was executed. */ cachedResult?: boolean; /** * Freeform informational message with details on the execution of the action that may be displayed to the user upon failure or when requested explicitly. */ message?: string; /** * The result of the action. */ result?: Schema$BuildBazelRemoteExecutionV2ActionResult; /** * An optional list of additional log outputs the server wishes to provide. A server can use this to return execution-specific logs however it wishes. This is intended primarily to make it easier for users to debug issues that may be outside of the actual job execution, such as by identifying the worker executing the action or by providing logs from the worker's setup phase. The keys SHOULD be human readable so that a client can display them to a user. */ serverLogs?: { [key: string]: Schema$BuildBazelRemoteExecutionV2LogFile; }; /** * If the status has a code other than `OK`, it indicates that the action did not finish execution. For example, if the operation times out during execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST use this field for errors in execution, rather than the error field on the `Operation` object. If the status code is other than `OK`, then the result MUST NOT be cached. For an error status, the `result` field is optional; the server may populate the output-, stdout-, and stderr-related fields if it has any information available, such as the stdout and stderr of a timed-out action. */ status?: Schema$GoogleRpcStatus; } /** * A `FileNode` represents a single file and associated metadata. */ interface Schema$BuildBazelRemoteExecutionV2FileNode { /** * The digest of the file's content. */ digest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * True if file is executable, false otherwise. */ isExecutable?: boolean; /** * The name of the file. */ name?: string; } /** * A `LogFile` is a log stored in the CAS. */ interface Schema$BuildBazelRemoteExecutionV2LogFile { /** * The digest of the log contents. */ digest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * This is a hint as to the purpose of the log, and is set to true if the log is human-readable text that can be usefully displayed to a user, and false otherwise. For instance, if a command-line client wishes to print the server logs to the terminal for a failed action, this allows it to avoid displaying a binary file. */ humanReadable?: boolean; } /** * An `OutputDirectory` is the output in an `ActionResult` corresponding to a directory's full contents rather than a single file. */ interface Schema$BuildBazelRemoteExecutionV2OutputDirectory { /** * The full path of the directory relative to the working directory. The path separator is a forward slash `/`. Since this is a relative path, it MUST NOT begin with a leading forward slash. The empty string value is allowed, and it denotes the entire working directory. */ path?: string; /** * The digest of the encoded Tree proto containing the directory's contents. */ treeDigest?: Schema$BuildBazelRemoteExecutionV2Digest; } /** * An `OutputFile` is similar to a FileNode, but it is used as an output in an `ActionResult`. It allows a full file path rather than only a name. */ interface Schema$BuildBazelRemoteExecutionV2OutputFile { /** * The contents of the file if inlining was requested. The server SHOULD NOT inline file contents unless requested by the client in the GetActionResultRequest message. The server MAY omit inlining, even if requested, and MUST do so if inlining would cause the response to exceed message size limits. */ contents?: string; /** * The digest of the file's content. */ digest?: Schema$BuildBazelRemoteExecutionV2Digest; /** * True if file is executable, false otherwise. */ isExecutable?: boolean; /** * The full path of the file relative to the working directory, including the filename. The path separator is a forward slash `/`. Since this is a relative path, it MUST NOT begin with a leading forward slash. */ path?: string; } /** * An `OutputSymlink` is similar to a Symlink, but it is used as an output in an `ActionResult`. `OutputSymlink` is binary-compatible with `SymlinkNode`. */ interface Schema$BuildBazelRemoteExecutionV2OutputSymlink { /** * The full path of the symlink relative to the working directory, including the filename. The path separator is a forward slash `/`. Since this is a relative path, it MUST NOT begin with a leading forward slash. */ path?: string; /** * The target path of the symlink. The path separator is a forward slash `/`. The target path can be relative to the parent directory of the symlink or it can be an absolute path starting with `/`. Support for absolute paths can be checked using the Capabilities API. The canonical form forbids the substrings `/./` and `//` in the target path. `..` components are allowed anywhere in the target path. */ target?: string; } /** * A `Platform` is a set of requirements, such as hardware, operating system, or compiler toolchain, for an Action's execution environment. A `Platform` is represented as a series of key-value pairs representing the properties that are required of the platform. */ interface Schema$BuildBazelRemoteExecutionV2Platform { /** * The properties that make up this platform. In order to ensure that equivalent `Platform`s always hash to the same value, the properties MUST be lexicographically sorted by name, and then by value. Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. */ properties?: Schema$BuildBazelRemoteExecutionV2PlatformProperty[]; } /** * A single property for the environment. The server is responsible for specifying the property `name`s that it accepts. If an unknown `name` is provided in the requirements for an Action, the server SHOULD reject the execution request. If permitted by the server, the same `name` may occur multiple times. The server is also responsible for specifying the interpretation of property `value`s. For instance, a property describing how much RAM must be available may be interpreted as allowing a worker with 16GB to fulfill a request for 8GB, while a property describing the OS environment on which the action must be performed may require an exact match with the worker's OS. The server MAY use the `value` of one or more properties to determine how it sets up the execution environment, such as by making specific system files available to the worker. */ interface Schema$BuildBazelRemoteExecutionV2PlatformProperty { /** * The property name. */ name?: string; /** * The property value. */ value?: string; } /** * An optional Metadata to attach to any RPC request to tell the server about an external context of the request. The server may use this for logging or other purposes. To use it, the client attaches the header to the call using the canonical proto serialization: * name: `build.bazel.remote.execution.v2.requestmetadata-bin` * contents: the base64 encoded binary `RequestMetadata` message. Note: the gRPC library serializes binary headers encoded in base 64 by default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests). Therefore, if the gRPC library is used to pass/retrieve this metadata, the user may ignore the base64 encoding and assume it is simply serialized as a binary message. */ interface Schema$BuildBazelRemoteExecutionV2RequestMetadata { /** * An identifier that ties multiple requests to the same action. For example, multiple requests to the CAS, Action Cache, and Execution API are used in order to compile foo.cc. */ actionId?: string; /** * An identifier to tie multiple tool invocations together. For example, runs of foo_test, bar_test and baz_test on a post-submit of a given patch. */ correlatedInvocationsId?: string; /** * The details for the tool invoking the requests. */ toolDetails?: Schema$BuildBazelRemoteExecutionV2ToolDetails; /** * An identifier that ties multiple actions together to a final result. For example, multiple actions are required to build and run foo_test. */ toolInvocationId?: string; } /** * A `SymlinkNode` represents a symbolic link. */ interface Schema$BuildBazelRemoteExecutionV2SymlinkNode { /** * The name of the symlink. */ name?: string; /** * The target path of the symlink. The path separator is a forward slash `/`. The target path can be relative to the parent directory of the symlink or it can be an absolute path starting with `/`. Support for absolute paths can be checked using the Capabilities API. The canonical form forbids the substrings `/./` and `//` in the target path. `..` components are allowed anywhere in the target path. */ target?: string; } /** * Details for the tool used to call the API. */ interface Schema$BuildBazelRemoteExecutionV2ToolDetails { /** * Name of the tool, e.g. bazel. */ toolName?: string; /** * Version of the tool used for the request, e.g. 5.0.3. */ toolVersion?: string; } /** * A `Tree` contains all the Directory protos in a single directory Merkle tree, compressed into one message. */ interface Schema$BuildBazelRemoteExecutionV2Tree { /** * All the child directories: the directories referred to by the root and, recursively, all its children. In order to reconstruct the directory tree, the client must take the digests of each of the child directories and then build up a tree starting from the `root`. */ children?: Schema$BuildBazelRemoteExecutionV2Directory[]; /** * The root directory in the tree. */ root?: Schema$BuildBazelRemoteExecutionV2Directory; } /** * Media resource. */ interface Schema$GoogleBytestreamMedia { /** * Name of the media resource. */ resourceName?: string; } /** * CommandDuration contains the various duration metrics tracked when a bot performs a command. */ interface Schema$GoogleDevtoolsRemotebuildbotCommandDurations { /** * The time spent preparing the command to be run in a Docker container (includes pulling the Docker image, if necessary). */ dockerPrep?: string; /** * The time spent downloading the input files and constructing the working directory. */ download?: string; /** * The time spent executing the command (i.e., doing useful work). */ execution?: string; /** * The timestamp when preparation is done and bot starts downloading files. */ isoPrepDone?: string; /** * The time spent completing the command, in total. */ overall?: string; /** * The time spent uploading the stdout logs. */ stdout?: string; /** * The time spent uploading the output files. */ upload?: string; } /** * CommandEvents contains counters for the number of warnings and errors that occurred during the execution of a command. */ interface Schema$GoogleDevtoolsRemotebuildbotCommandEvents { /** * Indicates whether we are using a cached Docker image (true) or had to pull the Docker image (false) for this command. */ dockerCacheHit?: boolean; /** * The input cache miss ratio. */ inputCacheMiss?: number; /** * The number of errors reported. */ numErrors?: string; /** * The number of warnings reported. */ numWarnings?: string; } /** * The internal status of the command result. */ interface Schema$GoogleDevtoolsRemotebuildbotCommandStatus { /** * The status code. */ code?: string; /** * The error message. */ message?: string; } /** * The request used for `CreateInstance`. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateInstanceRequest { /** * Specifies the instance to create. The name in the instance, if specified in the instance, is ignored. */ instance?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance; /** * ID of the created instance. A valid `instance_id` must: be 6-50 characters long, contain only lowercase letters, digits, hyphens and underscores, start with a lowercase letter, and end with a lowercase letter or a digit. */ instanceId?: string; /** * Resource name of the project containing the instance. Format: `projects/[PROJECT_ID]`. */ parent?: string; } /** * The request used for `CreateWorkerPool`. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaCreateWorkerPoolRequest { /** * Resource name of the instance in which to create the new worker pool. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`. */ parent?: string; /** * ID of the created worker pool. A valid pool ID must: be 6-50 characters long, contain only lowercase letters, digits, hyphens and underscores, start with a lowercase letter, and end with a lowercase letter or a digit. */ poolId?: string; /** * Specifies the worker pool to create. The name in the worker pool, if specified, is ignored. */ workerPool?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerPool; } /** * The request used for `DeleteInstance`. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteInstanceRequest { /** * Name of the instance to delete. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`. */ name?: string; } /** * The request used for DeleteWorkerPool. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaDeleteWorkerPoolRequest { /** * Name of the worker pool to delete. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`. */ name?: string; } /** * The request used for `GetInstance`. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetInstanceRequest { /** * Name of the instance to retrieve. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`. */ name?: string; } /** * The request used for GetWorkerPool. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaGetWorkerPoolRequest { /** * Name of the worker pool to retrieve. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`. */ name?: string; } /** * Instance conceptually encapsulates all Remote Build Execution resources for remote builds. An instance consists of storage and compute resources (for example, `ContentAddressableStorage`, `ActionCache`, `WorkerPools`) used for running remote builds. All Remote Build Execution API calls are scoped to an instance. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance { /** * The location is a GCP region. Currently only `us-central1` is supported. */ location?: string; /** * Output only. Whether stack driver logging is enabled for the instance. */ loggingEnabled?: boolean; /** * Output only. Instance resource name formatted as: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`. Name should not be populated when creating an instance since it is provided in the `instance_id` field. */ name?: string; /** * Output only. State of the instance. */ state?: string; } interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesRequest { /** * Resource name of the project. Format: `projects/[PROJECT_ID]`. */ parent?: string; } interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaListInstancesResponse { /** * The list of instances in a given project. */ instances?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaInstance[]; } interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsRequest { /** * Optional. A filter to constrain the pools returned. Filters have the form: <field> <operator> <value> [[AND|OR] <field> <operator> <value>]... <field> is the path for a field or map key in the Pool proto message. e.g. "configuration.disk_size_gb" or "configuration.labels.key". <operator> can be one of "<", "<=", ">=", ">", "=", "!=", ":". ":" is a HAS operation for strings and repeated primitive fields. <value> is the value to test, case-insensitive for strings. "*" stands for any value and can be used to test for key presence. Parenthesis determine AND/OR precedence. In space separated restrictions, AND is implicit, e.g. "a = b x = y" is equivalent to "a = b AND x = y". Example filter: configuration.labels.key1 = * AND (state = RUNNING OR state = UPDATING) */ filter?: string; /** * Resource name of the instance. Format: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]`. */ parent?: string; } interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaListWorkerPoolsResponse { /** * The list of worker pools in a given instance. */ workerPools?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerPool[]; } /** * The request used for UpdateWorkerPool. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaUpdateWorkerPoolRequest { /** * The update mask applies to worker_pool. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If an empty update_mask is provided, only the non-default valued field in the worker pool field will be updated. Note that in order to update a field to the default value (zero, false, empty string) an explicit update_mask must be provided. */ updateMask?: string; /** * Specifies the worker pool to update. */ workerPool?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerPool; } /** * Defines the configuration to be used for a creating workers in the worker pool. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig { /** * Required. Size of the disk attached to the worker, in GB. See https://cloud.google.com/compute/docs/disks/ */ diskSizeGb?: string; /** * Required. Disk Type to use for the worker. See [Storage options](https://cloud.google.com/compute/docs/disks/#introduction). Currently only `pd-standard` is supported. */ diskType?: string; /** * Labels associated with the workers. Label keys and values can be no longer than 63 characters, can only contain lowercase letters, numeric characters, underscores and dashes. International letters are permitted. Keys must start with a letter but values are optional. There can not be more than 64 labels per resource. */ labels?: { [key: string]: string; }; /** * Required. Machine type of the worker, such as `n1-standard-2`. See https://cloud.google.com/compute/docs/machine-types for a list of supported machine types. Note that `f1-micro` and `g1-small` are not yet supported. */ machineType?: string; /** * Minimum CPU platform to use when creating the worker. See [CPU Platforms](https://cloud.google.com/compute/docs/cpu-platforms). */ minCpuPlatform?: string; /** * Determines whether the worker is reserved (equivalent to a Compute Engine on-demand VM and therefore won't be preempted). See [Preemptible VMs](https://cloud.google.com/preemptible-vms/) for more details. */ reserved?: boolean; } /** * A worker pool resource in the Remote Build Execution API. */ interface Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerPool { /** * WorkerPool resource name formatted as: `projects/[PROJECT_ID]/instances/[INSTANCE_ID]/workerpools/[POOL_ID]`. name should not be populated when creating a worker pool since it is provided in the `poolId` field. */ name?: string; /** * Output only. State of the worker pool. */ state?: string; /** * Specifies the properties, such as machine type and disk size, used for creating workers in a worker pool. */ workerConfig?: Schema$GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig; /** * The desired number of workers in the worker pool. Must be a value between 0 and 1000. */ workerCount?: string; } /** * An ActionResult represents the result of an Action being run. */ interface Schema$GoogleDevtoolsRemoteexecutionV1testActionResult { /** * The exit code of the command. */ exitCode?: number; /** * The output directories of the action. For each output directory requested in the `output_directories` field of the Action, if the corresponding directory existed after the action completed, a single entry will be present in the output list, which will contain the digest of a Tree message containing the directory tree, and the path equal exactly to the corresponding Action output_directories member. As an example, suppose the Action had an output directory `a/b/dir` and the execution produced the following contents in `a/b/dir`: a file named `bar` and a directory named `foo` with an executable file named `baz`. Then, output_directory will contain (hashes shortened for readability): ```json // OutputDirectory proto: { path: "a/b/dir" tree_digest: { hash: "4a73bc9d03...", size: 55 } } // Tree proto with hash "4a73bc9d03..." and size 55: { root: { files: [ { name: "bar", digest: { hash: "4a73bc9d03...", size: 65534 } } ], directories: [ { name: "foo", digest: { hash: "4cf2eda940...", size: 43 } } ] } children : { // (Directory proto with hash "4cf2eda940..." and size 43) files: [ { name: "baz", digest: { hash: "b2c941073e...", size: 1294, }, is_executable: true } ] } } ``` */ outputDirectories?: Schema$GoogleDevtoolsRemoteexecutionV1testOutputDirectory[]; /** * The output files of the action. For each output file requested in the `output_files` field of the Action, if the corresponding file existed after the action completed, a single entry will be present in the output list. If the action does not produce the requested output, or produces a directory where a regular file is expected or vice versa, then that output will be omitted from the list. The server is free to arrange the output list as desired; clients MUST NOT assume that the output list is sorted. */ outputFiles?: Schema$GoogleDevtoolsRemoteexecutionV1testOutputFile[]; /** * The digest for a blob containing the standard error of the action, which can be retrieved from the ContentAddressableStorage. See `stderr_raw` for when this will be set. */ stderrDigest?: Schema$GoogleDevtoolsRemoteexecutionV1testDigest; /** * The standard error buffer of the action. The server will determine, based on the size of the buffer, whether to return it in raw form or to return a digest in `stderr_digest` that points to the buffer. If neither is set, then the buffer is empty. The client SHOULD NOT assume it will get