googleapis
Version:
Google APIs Client Library for Node.js
1,020 lines • 126 kB
TypeScript
/**
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { OAuth2Client, JWT, Compute, UserRefreshClient } from 'google-auth-library';
import { GoogleConfigurable, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext } from 'googleapis-common';
import { GaxiosPromise } from 'gaxios';
export declare namespace toolresults_v1beta3 {
interface Options extends GlobalOptions {
version: 'v1beta3';
}
interface StandardParameters {
/**
* Data format for the response.
*/
alt?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* An opaque string that represents a user for quota purposes. Must not exceed 40 characters.
*/
quotaUser?: string;
/**
* Deprecated. Please use quotaUser instead.
*/
userIp?: string;
}
/**
* Cloud Tool Results API
*
* Reads and publishes results from Firebase Test Lab.
*
* @example
* const {google} = require('googleapis');
* const toolresults = google.toolresults('v1beta3');
*
* @namespace toolresults
* @type {Function}
* @version v1beta3
* @variation v1beta3
* @param {object=} options Options for Toolresults
*/
class Toolresults {
context: APIRequestContext;
projects: Resource$Projects;
constructor(options: GlobalOptions, google?: GoogleConfigurable);
}
/**
* Android app information.
*/
interface Schema$AndroidAppInfo {
/**
* The name of the app. Optional
*/
name?: string;
/**
* The package name of the app. Required.
*/
packageName?: string;
/**
* The internal version code of the app. Optional.
*/
versionCode?: string;
/**
* The version name of the app. Optional.
*/
versionName?: string;
}
/**
* A test of an Android application that can control an Android component independently of its normal lifecycle. See for more information on types of Android tests.
*/
interface Schema$AndroidInstrumentationTest {
/**
* The java package for the test to be executed. Required
*/
testPackageId?: string;
/**
* The InstrumentationTestRunner class. Required
*/
testRunnerClass?: string;
/**
* Each target must be fully qualified with the package name or class name, in one of these formats: - "package package_name" - "class package_name.class_name" - "class package_name.class_name#method_name" If empty, all targets in the module will be run.
*/
testTargets?: string[];
/**
* The flag indicates whether Android Test Orchestrator will be used to run test or not.
*/
useOrchestrator?: boolean;
}
/**
* A test of an android application that explores the application on a virtual or physical Android device, finding culprits and crashes as it goes.
*/
interface Schema$AndroidRoboTest {
/**
* The initial activity that should be used to start the app. Optional
*/
appInitialActivity?: string;
/**
* The java package for the bootstrap. Optional
*/
bootstrapPackageId?: string;
/**
* The runner class for the bootstrap. Optional
*/
bootstrapRunnerClass?: string;
/**
* The max depth of the traversal stack Robo can explore. Optional
*/
maxDepth?: number;
/**
* The max number of steps/actions Robo can execute. Default is no limit (0). Optional
*/
maxSteps?: number;
}
/**
* An Android mobile test specification.
*/
interface Schema$AndroidTest {
/**
* Information about the application under test.
*/
androidAppInfo?: Schema$AndroidAppInfo;
/**
* An Android instrumentation test.
*/
androidInstrumentationTest?: Schema$AndroidInstrumentationTest;
/**
* An Android robo test.
*/
androidRoboTest?: Schema$AndroidRoboTest;
/**
* Max time a test is allowed to run before it is automatically cancelled.
*/
testTimeout?: Schema$Duration;
}
/**
* `Any` contains an arbitrary serialized protocol buffer message along with a URL that describes the type of the serialized message. Protobuf library provides support to pack/unpack Any values in the form of utility functions or additional generated methods of the Any type. Example 1: Pack and unpack a message in C++. Foo foo = ...; Any any; any.PackFrom(foo); ... if (any.UnpackTo(&foo)) { ... } Example 2: Pack and unpack a message in Java. Foo foo = ...; Any any = Any.pack(foo); ... if (any.is(Foo.class)) { foo = any.unpack(Foo.class); } Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() any.Pack(foo) ... if any.Is(Foo.DESCRIPTOR): any.Unpack(foo) ... Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} any, err := ptypes.MarshalAny(foo) ... foo := &pb.Foo{} if err := ptypes.UnmarshalAny(any, foo); err != nil { ... } The pack methods provided by protobuf library will by default use 'type.googleapis.com/full.type.name' as the type URL and the unpack methods only use the fully qualified type name after the last '/' in the type URL, for example "foo.bar.com/x/y.z" will yield type name "y.z". JSON ==== The JSON representation of an `Any` value uses the regular representation of the deserialized, embedded message, with an additional field `@type` which contains the type URL. Example: package google.profile; message Person { string first_name = 1; string last_name = 2; } { "@type": "type.googleapis.com/google.profile.Person", "firstName": , "lastName": } If the embedded message type is well-known and has a custom JSON representation, that representation will be embedded adding a field `value` which holds the custom JSON in addition to the `@type` field. Example (for message [google.protobuf.Duration][]): { "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" }
*/
interface Schema$Any {
/**
* A URL/resource name that uniquely identifies the type of the serialized protocol buffer message. This string must contain at least one "/" character. The last segment of the URL's path must represent the fully qualified name of the type (as in `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading "." is not accepted). In practice, teams usually precompile into the binary all types that they expect it to use in the context of Any. However, for URLs which use the scheme `http`, `https`, or no scheme, one can optionally set up a type server that maps type URLs to message definitions as follows: * If no scheme is provided, `https` is assumed. * An HTTP GET on the URL must yield a [google.protobuf.Type][] value in binary format, or produce an error. * Applications are allowed to cache lookup results based on the URL, or have them precompiled into a binary to avoid any lookup. Therefore, binary compatibility needs to be preserved on changes to types. (Use versioned type names to manage breaking changes.) Note: this functionality is not currently available in the official protobuf release, and it is not used for type URLs beginning with type.googleapis.com. Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics.
*/
typeUrl?: string;
/**
* Must be a valid serialized protocol buffer of the above specified type.
*/
value?: string;
}
interface Schema$AppStartTime {
/**
* Optional. The time from app start to reaching the developer-reported "fully drawn" time. This is only stored if the app includes a call to Activity.reportFullyDrawn(). See https://developer.android.com/topic/performance/launch-time.html#time-full
*/
fullyDrawnTime?: Schema$Duration;
/**
* The time from app start to the first displayed activity being drawn, as reported in Logcat. See https://developer.android.com/topic/performance/launch-time.html#time-initial
*/
initialDisplayTime?: Schema$Duration;
}
/**
* Encapsulates the metadata for basic sample series represented by a line chart
*/
interface Schema$BasicPerfSampleSeries {
perfMetricType?: string;
perfUnit?: string;
sampleSeriesLabel?: string;
}
/**
* The request must provide up to a maximum of 5000 samples to be created; a larger sample size will cause an INVALID_ARGUMENT error
*/
interface Schema$BatchCreatePerfSamplesRequest {
/**
* The set of PerfSamples to create should not include existing timestamps
*/
perfSamples?: Schema$PerfSample[];
}
interface Schema$BatchCreatePerfSamplesResponse {
perfSamples?: Schema$PerfSample[];
}
interface Schema$CPUInfo {
/**
* description of the device processor ie '1.8 GHz hexa core 64-bit ARMv8-A'
*/
cpuProcessor?: string;
/**
* the CPU clock speed in GHz
*/
cpuSpeedInGhz?: number;
/**
* the number of CPU cores
*/
numberOfCores?: number;
}
/**
* A Duration represents a signed, fixed-length span of time represented as a count of seconds and fractions of seconds at nanosecond resolution. It is independent of any calendar and concepts like "day" or "month". It is related to Timestamp in that the difference between two Timestamp values is a Duration and it can be added or subtracted from a Timestamp. Range is approximately +-10,000 years. # Examples Example 1: Compute Duration from two Timestamps in pseudo code. Timestamp start = ...; Timestamp end = ...; Duration duration = ...; duration.seconds = end.seconds - start.seconds; duration.nanos = end.nanos - start.nanos; if (duration.seconds 0) { duration.seconds += 1; duration.nanos -= 1000000000; } else if (durations.seconds > 0 && duration.nanos < 0) { duration.seconds -= 1; duration.nanos += 1000000000; } Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. Timestamp start = ...; Duration duration = ...; Timestamp end = ...; end.seconds = start.seconds + duration.seconds; end.nanos = start.nanos + duration.nanos; if (end.nanos = 1000000000) { end.seconds += 1; end.nanos -= 1000000000; } Example 3: Compute Duration from datetime.timedelta in Python. td = datetime.timedelta(days=3, minutes=10) duration = Duration() duration.FromTimedelta(td) # JSON Mapping In JSON format, the Duration type is encoded as a string rather than an object, where the string ends in the suffix "s" (indicating seconds) and is preceded by the number of seconds, with nanoseconds expressed as fractional seconds. For example, 3 seconds with 0 nanoseconds should be encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should be expressed in JSON format as "3.000000001s", and 3 seconds and 1 microsecond should be expressed in JSON format as "3.000001s".
*/
interface Schema$Duration {
/**
* Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive.
*/
nanos?: number;
/**
* Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
*/
seconds?: string;
}
/**
* An Execution represents a collection of Steps. For instance, it could represent: - a mobile test executed across a range of device configurations - a jenkins job with a build step followed by a test step The maximum size of an execution message is 1 MiB. An Execution can be updated until its state is set to COMPLETE at which point it becomes immutable.
*/
interface Schema$Execution {
/**
* The time when the Execution status transitioned to COMPLETE. This value will be set automatically when state transitions to COMPLETE. - In response: set if the execution state is COMPLETE. - In create/update request: never set
*/
completionTime?: Schema$Timestamp;
/**
* The time when the Execution was created. This value will be set automatically when CreateExecution is called. - In response: always set - In create/update request: never set
*/
creationTime?: Schema$Timestamp;
/**
* A unique identifier within a History for this Execution. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response always set - In create/update request: never set
*/
executionId?: string;
/**
* Classify the result, for example into SUCCESS or FAILURE - In response: present if set by create/update request - In create/update request: optional
*/
outcome?: Schema$Outcome;
/**
* Lightweight information about execution request. - In response: present if set by create - In create: optional - In update: optional
*/
specification?: Schema$Specification;
/**
* The initial state is IN_PROGRESS. The only legal state transitions is from IN_PROGRESS to COMPLETE. A PRECONDITION_FAILED will be returned if an invalid transition is requested. The state can only be set to COMPLETE once. A FAILED_PRECONDITION will be returned if the state is set to COMPLETE multiple times. If the state is set to COMPLETE, all the in-progress steps within the execution will be set as COMPLETE. If the outcome of the step is not set, the outcome will be set to INCONCLUSIVE. - In response always set - In create/update request: optional
*/
state?: string;
/**
* TestExecution Matrix ID that the TestExecutionService uses. - In response: present if set by create - In create: optional - In update: never set
*/
testExecutionMatrixId?: string;
}
/**
* Details for an outcome with a FAILURE outcome summary.
*/
interface Schema$FailureDetail {
/**
* If the failure was severe because the system (app) under test crashed.
*/
crashed?: boolean;
/**
* If an app is not installed and thus no test can be run with the app. This might be caused by trying to run a test on an unsupported platform.
*/
notInstalled?: boolean;
/**
* If a native process (including any other than the app) crashed.
*/
otherNativeCrash?: boolean;
/**
* If the test overran some time limit, and that is why it failed.
*/
timedOut?: boolean;
/**
* If the robo was unable to crawl the app; perhaps because the app did not start.
*/
unableToCrawl?: boolean;
}
/**
* A reference to a file.
*/
interface Schema$FileReference {
/**
* The URI of a file stored in Google Cloud Storage. For example: http://storage.googleapis.com/mybucket/path/to/test.xml or in gsutil format: gs://mybucket/path/to/test.xml with version-specific info, gs://mybucket/path/to/test.xml#1360383693690000 An INVALID_ARGUMENT error will be returned if the URI format is not supported. - In response: always set - In create/update request: always set
*/
fileUri?: string;
}
/**
* Graphics statistics for the App. The information is collected from 'adb shell dumpsys graphicsstats'. For more info see: https://developer.android.com/training/testing/performance.html Statistics will only be present for API 23+.
*/
interface Schema$GraphicsStats {
/**
* Histogram of frame render times. There should be 154 buckets ranging from [5ms, 6ms) to [4950ms, infinity)
*/
buckets?: Schema$GraphicsStatsBucket[];
/**
* Total "high input latency" events.
*/
highInputLatencyCount?: string;
/**
* Total frames with slow render time. Should be <= total_frames.
*/
jankyFrames?: string;
/**
* Total "missed vsync" events.
*/
missedVsyncCount?: string;
/**
* 50th percentile frame render time in milliseconds.
*/
p50Millis?: string;
/**
* 90th percentile frame render time in milliseconds.
*/
p90Millis?: string;
/**
* 95th percentile frame render time in milliseconds.
*/
p95Millis?: string;
/**
* 99th percentile frame render time in milliseconds.
*/
p99Millis?: string;
/**
* Total "slow bitmap upload" events.
*/
slowBitmapUploadCount?: string;
/**
* Total "slow draw" events.
*/
slowDrawCount?: string;
/**
* Total "slow UI thread" events.
*/
slowUiThreadCount?: string;
/**
* Total frames rendered by package.
*/
totalFrames?: string;
}
interface Schema$GraphicsStatsBucket {
/**
* Number of frames in the bucket.
*/
frameCount?: string;
/**
* Lower bound of render time in milliseconds.
*/
renderMillis?: string;
}
/**
* A History represents a sorted list of Executions ordered by the start_timestamp_millis field (descending). It can be used to group all the Executions of a continuous build. Note that the ordering only operates on one-dimension. If a repository has multiple branches, it means that multiple histories will need to be used in order to order Executions per branch.
*/
interface Schema$History {
/**
* A short human-readable (plain text) name to display in the UI. Maximum of 100 characters. - In response: present if set during create. - In create request: optional
*/
displayName?: string;
/**
* A unique identifier within a project for this History. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response always set - In create request: never set
*/
historyId?: string;
/**
* A name to uniquely identify a history within a project. Maximum of 200 characters. - In response always set - In create request: always set
*/
name?: string;
}
/**
* An image, with a link to the main image and a thumbnail.
*/
interface Schema$Image {
/**
* An error explaining why the thumbnail could not be rendered.
*/
error?: Schema$Status;
/**
* A reference to the full-size, original image. This is the same as the tool_outputs entry for the image under its Step. Always set.
*/
sourceImage?: Schema$ToolOutputReference;
/**
* The step to which the image is attached. Always set.
*/
stepId?: string;
/**
* The thumbnail.
*/
thumbnail?: Schema$Thumbnail;
}
/**
* Details for an outcome with an INCONCLUSIVE outcome summary.
*/
interface Schema$InconclusiveDetail {
/**
* If the end user aborted the test execution before a pass or fail could be determined. For example, the user pressed ctrl-c which sent a kill signal to the test runner while the test was running.
*/
abortedByUser?: boolean;
/**
* If results are being provided to the user in certain cases of infrastructure failures
*/
hasErrorLogs?: boolean;
/**
* If the test runner could not determine success or failure because the test depends on a component other than the system under test which failed. For example, a mobile test requires provisioning a device where the test executes, and that provisioning can fail.
*/
infrastructureFailure?: boolean;
}
/**
* Step Id and outcome of each individual step that was run as a group with other steps with the same configuration.
*/
interface Schema$IndividualOutcome {
/**
* Unique int given to each step. Ranges from 0(inclusive) to total number of steps(exclusive). The primary step is 0.
*/
multistepNumber?: number;
outcomeSummary?: string;
/**
* How long it took for this step to run.
*/
runDuration?: Schema$Duration;
stepId?: string;
}
interface Schema$ListExecutionsResponse {
/**
* Executions. Always set.
*/
executions?: Schema$Execution[];
/**
* A continuation token to resume the query at the next item. Will only be set if there are more Executions to fetch.
*/
nextPageToken?: string;
}
/**
* Response message for HistoryService.List
*/
interface Schema$ListHistoriesResponse {
/**
* Histories.
*/
histories?: Schema$History[];
/**
* A continuation token to resume the query at the next item. Will only be set if there are more histories to fetch. Tokens are valid for up to one hour from the time of the first list request. For instance, if you make a list request at 1PM and use the token from this first request 10 minutes later, the token from this second response will only be valid for 50 minutes.
*/
nextPageToken?: string;
}
interface Schema$ListPerfSampleSeriesResponse {
/**
* The resulting PerfSampleSeries sorted by id
*/
perfSampleSeries?: Schema$PerfSampleSeries[];
}
interface Schema$ListPerfSamplesResponse {
/**
* Optional, returned if result size exceeds the page size specified in the request (or the default page size, 500, if unspecified). It indicates the last sample timestamp to be used as page_token in subsequent request
*/
nextPageToken?: string;
perfSamples?: Schema$PerfSample[];
}
interface Schema$ListScreenshotClustersResponse {
/**
* The set of clusters associated with an execution Always set
*/
clusters?: Schema$ScreenshotCluster[];
}
/**
* Response message for StepService.List.
*/
interface Schema$ListStepsResponse {
/**
* A continuation token to resume the query at the next item. If set, indicates that there are more steps to read, by calling list again with this value in the page_token field.
*/
nextPageToken?: string;
/**
* Steps.
*/
steps?: Schema$Step[];
}
/**
* A response containing the thumbnails in a step.
*/
interface Schema$ListStepThumbnailsResponse {
/**
* A continuation token to resume the query at the next item. If set, indicates that there are more thumbnails to read, by calling list again with this value in the page_token field.
*/
nextPageToken?: string;
/**
* A list of image data. Images are returned in a deterministic order; they are ordered by these factors, in order of importance: * First, by their associated test case. Images without a test case are considered greater than images with one. * Second, by their creation time. Images without a creation time are greater than images with one. * Third, by the order in which they were added to the step (by calls to CreateStep or UpdateStep).
*/
thumbnails?: Schema$Image[];
}
/**
* Response message for StepService.ListTestCases.
*/
interface Schema$ListTestCasesResponse {
nextPageToken?: string;
/**
* List of test cases.
*/
testCases?: Schema$TestCase[];
}
interface Schema$MemoryInfo {
/**
* Maximum memory that can be allocated to the process in KiB
*/
memoryCapInKibibyte?: string;
/**
* Total memory available on the device in KiB
*/
memoryTotalInKibibyte?: string;
}
/**
* Details when multiple steps are run with the same configuration as a group.
*/
interface Schema$MultiStep {
/**
* Unique int given to each step. Ranges from 0(inclusive) to total number of steps(exclusive). The primary step is 0.
*/
multistepNumber?: number;
/**
* Present if it is a primary (original) step.
*/
primaryStep?: Schema$PrimaryStep;
/**
* Step Id of the primary (original) step, which might be this step.
*/
primaryStepId?: string;
}
/**
* Interprets a result so that humans and machines can act on it.
*/
interface Schema$Outcome {
/**
* More information about a FAILURE outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not FAILURE. Optional
*/
failureDetail?: Schema$FailureDetail;
/**
* More information about an INCONCLUSIVE outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not INCONCLUSIVE. Optional
*/
inconclusiveDetail?: Schema$InconclusiveDetail;
/**
* More information about a SKIPPED outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not SKIPPED. Optional
*/
skippedDetail?: Schema$SkippedDetail;
/**
* More information about a SUCCESS outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not SUCCESS. Optional
*/
successDetail?: Schema$SuccessDetail;
/**
* The simplest way to interpret a result. Required
*/
summary?: string;
}
/**
* Encapsulates performance environment info
*/
interface Schema$PerfEnvironment {
/**
* CPU related environment info
*/
cpuInfo?: Schema$CPUInfo;
/**
* Memory related environment info
*/
memoryInfo?: Schema$MemoryInfo;
}
/**
* A summary of perf metrics collected and performance environment info
*/
interface Schema$PerfMetricsSummary {
appStartTime?: Schema$AppStartTime;
/**
* A tool results execution ID.
*/
executionId?: string;
/**
* Graphics statistics for the entire run. Statistics are reset at the beginning of the run and collected at the end of the run.
*/
graphicsStats?: Schema$GraphicsStats;
/**
* A tool results history ID.
*/
historyId?: string;
/**
* Describes the environment in which the performance metrics were collected
*/
perfEnvironment?: Schema$PerfEnvironment;
/**
* Set of resource collected
*/
perfMetrics?: string[];
/**
* The cloud project
*/
projectId?: string;
/**
* A tool results step ID.
*/
stepId?: string;
}
/**
* Resource representing a single performance measure or data point
*/
interface Schema$PerfSample {
/**
* Timestamp of collection
*/
sampleTime?: Schema$Timestamp;
/**
* Value observed
*/
value?: number;
}
/**
* Resource representing a collection of performance samples (or data points)
*/
interface Schema$PerfSampleSeries {
/**
* Basic series represented by a line chart
*/
basicPerfSampleSeries?: Schema$BasicPerfSampleSeries;
/**
* A tool results execution ID.
*/
executionId?: string;
/**
* A tool results history ID.
*/
historyId?: string;
/**
* The cloud project
*/
projectId?: string;
/**
* A sample series id
*/
sampleSeriesId?: string;
/**
* A tool results step ID.
*/
stepId?: string;
}
/**
* Stores rollup test status of multiple steps that were run as a group and outcome of each individual step.
*/
interface Schema$PrimaryStep {
/**
* Step Id and outcome of each individual step.
*/
individualOutcome?: Schema$IndividualOutcome[];
/**
* Rollup test status of multiple steps that were run with the same configuration as a group.
*/
rollUp?: string;
}
/**
* Per-project settings for the Tool Results service.
*/
interface Schema$ProjectSettings {
/**
* The name of the Google Cloud Storage bucket to which results are written. By default, this is unset. In update request: optional In response: optional
*/
defaultBucket?: string;
/**
* The name of the project's settings. Always of the form: projects/{project-id}/settings In update request: never set In response: always set
*/
name?: string;
}
/**
* Request message for StepService.PublishXunitXmlFiles.
*/
interface Schema$PublishXunitXmlFilesRequest {
/**
* URI of the Xunit XML files to publish. The maximum size of the file this reference is pointing to is 50MB. Required.
*/
xunitXmlFiles?: Schema$FileReference[];
}
interface Schema$Screen {
/**
* File reference of the png file. Required.
*/
fileReference?: string;
/**
* Locale of the device that the screenshot was taken on. Required.
*/
locale?: string;
/**
* Model of the device that the screenshot was taken on. Required.
*/
model?: string;
/**
* OS version of the device that the screenshot was taken on. Required.
*/
version?: string;
}
interface Schema$ScreenshotCluster {
/**
* A string that describes the activity of every screen in the cluster.
*/
activity?: string;
/**
* A unique identifier for the cluster.
*/
clusterId?: string;
/**
* A singular screen that represents the cluster as a whole. This screen will act as the "cover" of the entire cluster. When users look at the clusters, only the key screen from each cluster will be shown. Which screen is the key screen is determined by the ClusteringAlgorithm
*/
keyScreen?: Schema$Screen;
/**
* Full list of screens.
*/
screens?: Schema$Screen[];
}
/**
* Details for an outcome with a SKIPPED outcome summary.
*/
interface Schema$SkippedDetail {
/**
* If the App doesn't support the specific API level.
*/
incompatibleAppVersion?: boolean;
/**
* If the App doesn't run on the specific architecture, for example, x86.
*/
incompatibleArchitecture?: boolean;
/**
* If the requested OS version doesn't run on the specific device model.
*/
incompatibleDevice?: boolean;
}
/**
* The details about how to run the execution.
*/
interface Schema$Specification {
/**
* An Android mobile test execution specification.
*/
androidTest?: Schema$AndroidTest;
}
/**
* A stacktrace.
*/
interface Schema$StackTrace {
/**
* The stack trace message. Required
*/
exception?: string;
}
/**
* The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
*/
interface Schema$Status {
/**
* The status code, which should be an enum value of [google.rpc.Code][].
*/
code?: number;
/**
* A list of messages that carry the error details. There is a common set of message types for APIs to use.
*/
details?: Schema$Any[];
/**
* A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the [google.rpc.Status.details][] field, or localized by the client.
*/
message?: string;
}
/**
* A Step represents a single operation performed as part of Execution. A step can be used to represent the execution of a tool ( for example a test runner execution or an execution of a compiler). Steps can overlap (for instance two steps might have the same start time if some operations are done in parallel). Here is an example, let's consider that we have a continuous build is executing a test runner for each iteration. The workflow would look like: - user creates a Execution with id 1 - user creates an TestExecutionStep with id 100 for Execution 1 - user update TestExecutionStep with id 100 to add a raw xml log + the service parses the xml logs and returns a TestExecutionStep with updated TestResult(s). - user update the status of TestExecutionStep with id 100 to COMPLETE A Step can be updated until its state is set to COMPLETE at which points it becomes immutable.
*/
interface Schema$Step {
/**
* The time when the step status was set to complete. This value will be set automatically when state transitions to COMPLETE. - In response: set if the execution state is COMPLETE. - In create/update request: never set
*/
completionTime?: Schema$Timestamp;
/**
* The time when the step was created. - In response: always set - In create/update request: never set
*/
creationTime?: Schema$Timestamp;
/**
* A description of this tool For example: mvn clean package -D skipTests=true - In response: present if set by create/update request - In create/update request: optional
*/
description?: string;
/**
* How much the device resource is used to perform the test. This is the device usage used for billing purpose, which is different from the run_duration, for example, infrastructure failure won't be charged for device usage. PRECONDITION_FAILED will be returned if one attempts to set a device_usage on a step which already has this field set. - In response: present if previously set. - In create request: optional - In update request: optional
*/
deviceUsageDuration?: Schema$Duration;
/**
* If the execution containing this step has any dimension_definition set, then this field allows the child to specify the values of the dimensions. The keys must exactly match the dimension_definition of the execution. For example, if the execution has `dimension_definition = ['attempt', 'device']` then a step must define values for those dimensions, eg. `dimension_value = ['attempt': '1', 'device': 'Nexus 6']` If a step does not participate in one dimension of the matrix, the value for that dimension should be empty string. For example, if one of the tests is executed by a runner which does not support retries, the step could have `dimension_value = ['attempt': '', 'device': 'Nexus 6']` If the step does not participate in any dimensions of the matrix, it may leave dimension_value unset. A PRECONDITION_FAILED will be returned if any of the keys do not exist in the dimension_definition of the execution. A PRECONDITION_FAILED will be returned if another step in this execution already has the same name and dimension_value, but differs on other data fields, for example, step field is different. A PRECONDITION_FAILED will be returned if dimension_value is set, and there is a dimension_definition in the execution which is not specified as one of the keys. - In response: present if set by create - In create request: optional - In update request: never set
*/
dimensionValue?: Schema$StepDimensionValueEntry[];
/**
* Whether any of the outputs of this step are images whose thumbnails can be fetched with ListThumbnails. - In response: always set - In create/update request: never set
*/
hasImages?: boolean;
/**
* Arbitrary user-supplied key/value pairs that are associated with the step. Users are responsible for managing the key namespace such that keys don't accidentally collide. An INVALID_ARGUMENT will be returned if the number of labels exceeds 100 or if the length of any of the keys or values exceeds 100 characters. - In response: always set - In create request: optional - In update request: optional; any new key/value pair will be added to the map, and any new value for an existing key will update that key's value
*/
labels?: Schema$StepLabelsEntry[];
/**
* Details when multiple steps are run with the same configuration as a group. These details can be used identify which group this step is part of. It also identifies the groups 'primary step' which indexes all the group members. - In response: present if previously set. - In create request: optional, set iff this step was performed more than once. - In update request: optional
*/
multiStep?: Schema$MultiStep;
/**
* A short human-readable name to display in the UI. Maximum of 100 characters. For example: Clean build A PRECONDITION_FAILED will be returned upon creating a new step if it shares its name and dimension_value with an existing step. If two steps represent a similar action, but have different dimension values, they should share the same name. For instance, if the same set of tests is run on two different platforms, the two steps should have the same name. - In response: always set - In create request: always set - In update request: never set
*/
name?: string;
/**
* Classification of the result, for example into SUCCESS or FAILURE - In response: present if set by create/update request - In create/update request: optional
*/
outcome?: Schema$Outcome;
/**
* How long it took for this step to run. If unset, this is set to the difference between creation_time and completion_time when the step is set to the COMPLETE state. In some cases, it is appropriate to set this value separately: For instance, if a step is created, but the operation it represents is queued for a few minutes before it executes, it would be appropriate not to include the time spent queued in its run_duration. PRECONDITION_FAILED will be returned if one attempts to set a run_duration on a step which already has this field set. - In response: present if previously set; always present on COMPLETE step - In create request: optional - In update request: optional
*/
runDuration?: Schema$Duration;
/**
* The initial state is IN_PROGRESS. The only legal state transitions are * IN_PROGRESS -> COMPLETE A PRECONDITION_FAILED will be returned if an invalid transition is requested. It is valid to create Step with a state set to COMPLETE. The state can only be set to COMPLETE once. A PRECONDITION_FAILED will be returned if the state is set to COMPLETE multiple times. - In response: always set - In create/update request: optional
*/
state?: string;
/**
* A unique identifier within a Execution for this Step. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response: always set - In create/update request: never set
*/
stepId?: string;
/**
* An execution of a test runner.
*/
testExecutionStep?: Schema$TestExecutionStep;
/**
* An execution of a tool (used for steps we don't explicitly support).
*/
toolExecutionStep?: Schema$ToolExecutionStep;
}
interface Schema$StepDimensionValueEntry {
key?: string;
value?: string;
}
interface Schema$StepLabelsEntry {
key?: string;
value?: string;
}
/**
* Details for an outcome with a SUCCESS outcome summary.
*/
interface Schema$SuccessDetail {
/**
* If a native process other than the app crashed.
*/
otherNativeCrash?: boolean;
}
interface Schema$TestCase {
/**
* The end time of the test case. Optional.
*/
endTime?: Schema$Timestamp;
/**
* Why the test case was skipped. Present only for skipped test case
*/
skippedMessage?: string;
/**
* The stack trace details if the test case failed or encountered an error. The maximum size of the stack traces is 100KiB, beyond which the stack track will be truncated. Zero if the test case passed.
*/
stackTraces?: Schema$StackTrace[];
/**
* The start time of the test case. Optional.
*/
startTime?: Schema$Timestamp;
/**
* The status of the test case. Required.
*/
status?: string;
/**
* A unique identifier within a Step for this Test Case.
*/
testCaseId?: string;
/**
* Test case reference, e.g. name, class name and test suite name. Required.
*/
testCaseReference?: Schema$TestCaseReference;
/**
* References to opaque files of any format output by the tool execution.
*/
toolOutputs?: Schema$ToolOutputReference[];
}
/**
* A reference to a test case. Test case references are canonically ordered lexicographically by these three factors: * First, by test_suite_name. * Second, by class_name. * Third, by name.
*/
interface Schema$TestCaseReference {
/**
* The name of the class.
*/
className?: string;
/**
* The name of the test case. Required.
*/
name?: string;
/**
* The name of the test suite to which this test case belongs.
*/
testSuiteName?: string;
}
/**
* A step that represents running tests. It accepts ant-junit xml files which will be parsed into structured test results by the service. Xml file paths are updated in order to append more files, however they can't be deleted. Users can also add test results manually by using the test_result field.
*/
interface Schema$TestExecutionStep {
/**
* Issues observed during the test execution. For example, if the mobile app under test crashed during the test, the error message and the stack trace content can be recorded here to assist debugging. - In response: present if set by create or update - In create/update request: optional
*/
testIssues?: Schema$TestIssue[];
/**
* List of test suite overview contents. This could be parsed from xUnit XML log by server, or uploaded directly by user. This references should only be called when test suites are fully parsed or uploaded. The maximum allowed number of test suite overviews per step is 1000. - In response: always set - In create request: optional - In update request: never (use publishXunitXmlFiles custom method instead)
*/
testSuiteOverviews?: Schema$TestSuiteOverview[];
/**
* The timing break down of the test execution. - In response: present if set by create or update - In create/update request: optional
*/
testTiming?: Schema$TestTiming;
/**
* Represents the execution of the test runner. The exit code of this tool will be used to determine if the test passed. - In response: always set - In create/update request: optional
*/
toolExecution?: Schema$ToolExecution;
}
/**
* An issue detected occurring during a test execution.
*/
interface Schema$TestIssue {
/**
* Category of issue. Required.
*/
category?: string;
/**
* A brief human-readable message describing the issue. Required.
*/
errorMessage?: string;
/**
* Severity of issue. Required.
*/
severity?: string;
/**
* Deprecated in favor of stack trace fields inside specific warnings.
*/
stackTrace?: Schema$StackTrace;
/**
* Type of issue. Required.
*/
type?: string;
/**
* Warning message with additional details of the issue. Should always be a message from com.google.devtools.toolresults.v1.warnings
*/
warning?: Schema$Any;
}
/**
* A summary of a test suite result either parsed from XML or uploaded directly by a user. Note: the API related comments are for StepService only. This message is also being used in ExecutionService in a read only mode for the corresponding step.
*/
interface Schema$TestSuiteOverview {
/**
* Elapsed time of test suite.
*/
elapsedTime?: Schema$Duration;
/**
* Number of test cases in error, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never
*/
errorCount?: number;
/**
* Number of failed test cases, typically set by the service by parsing the xml_source. May also be set by the user. - In create/response: always set - In update request: never
*/
failureCount?: number;
/**
* The name of the test suite. - In create/response: always set - In update request: never
*/
name?: string;
/**
* Number of test cases not run, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never
*/
skippedCount?: number;
/**
* Number of test cases, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never
*/
totalCount?: number;
/**
* If this test suite was parsed from XML, this is the URI where the original XML file is stored. Note: Multiple test suites can share the same xml_source Returns INVALID_ARGUMENT if the uri format is not supported. - In create/response: optional - In update request: never
*/
xmlSource?: Schema$FileReference;
}
/**
* Testing timing break down to know phases.
*/
interface Schema$TestTiming {
/**
* How long it took to run the test process. - In response: present if previously set. - In create/update request: optional
*/
testProcessDuration?: Schema$Duration;
}
/**
* A single thumbnail, with its size and format.
*/
interface Schema$Thumbnail {
/**
* The thumbnail's content type, i.e. "image/png". Always set.
*/
contentType?: string;
/**
* The thumbnail file itself. That is, the bytes here are precisely the bytes that make up the thumbnail file; they can be served as an image as-is (with the appropriate content type.) Always set.
*/
data?: string;
/**
* The height of the thumbnail, in pixels. Always set.
*/
heightPx?: number;
/**
* The width of the thumbnail, in pixels. Always set.
*/
widthPx?: number;
}
/**
* A Timestamp represents a point in time i