googleapis
Version:
Google APIs Client Library for Node.js
877 lines • 209 kB
TypeScript
/**
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { OAuth2Client, JWT, Compute, UserRefreshClient } from 'google-auth-library';
import { GoogleConfigurable, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext } from 'googleapis-common';
import { GaxiosPromise } from 'gaxios';
export declare namespace monitoring_v3 {
interface Options extends GlobalOptions {
version: 'v3';
}
interface StandardParameters {
/**
* V1 error format.
*/
'$.xgafv'?: string;
/**
* OAuth access token.
*/
access_token?: string;
/**
* Data format for response.
*/
alt?: string;
/**
* JSONP
*/
callback?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
*/
quotaUser?: string;
/**
* Legacy upload protocol for media (e.g. "media", "multipart").
*/
uploadType?: string;
/**
* Upload protocol for media (e.g. "raw", "multipart").
*/
upload_protocol?: string;
}
/**
* Stackdriver Monitoring API
*
* Manages your Stackdriver Monitoring data and configurations. Most projects must be associated with a Stackdriver account, with a few exceptions as noted on the individual method pages. The table entries below are presented in alphabetical order, not in order of common use. For explanations of the concepts found in the table entries, read the Stackdriver Monitoring documentation.
*
* @example
* const {google} = require('googleapis');
* const monitoring = google.monitoring('v3');
*
* @namespace monitoring
* @type {Function}
* @version v3
* @variation v3
* @param {object=} options Options for Monitoring
*/
class Monitoring {
context: APIRequestContext;
projects: Resource$Projects;
uptimeCheckIps: Resource$Uptimecheckips;
constructor(options: GlobalOptions, google?: GoogleConfigurable);
}
/**
* Describes how to combine multiple time series to provide different views of the data. Aggregation consists of an alignment step on individual time series (alignment_period and per_series_aligner) followed by an optional reduction step of the data across the aligned time series (cross_series_reducer and group_by_fields). For more details, see Aggregation.
*/
interface Schema$Aggregation {
/**
* The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.
*/
alignmentPeriod?: string;
/**
* The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.
*/
crossSeriesReducer?: string;
/**
* The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.
*/
groupByFields?: string[];
/**
* The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.
*/
perSeriesAligner?: string;
}
/**
* A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see Introduction to Alerting.
*/
interface Schema$AlertPolicy {
/**
* How to combine the results of multiple conditions to determine if an incident should be opened.
*/
combiner?: string;
/**
* A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions.
*/
conditions?: Schema$Condition[];
/**
* A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored.
*/
creationRecord?: Schema$MutationRecord;
/**
* A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.
*/
displayName?: string;
/**
* Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation.
*/
documentation?: Schema$Documentation;
/**
* Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out.
*/
enabled?: boolean;
/**
* A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored.
*/
mutationRecord?: Schema$MutationRecord;
/**
* Required if the policy exists. The resource name for this policy. The syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.
*/
name?: string;
/**
* Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The syntax of the entries in this field is: projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
*/
notificationChannels?: string[];
/**
* User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
*/
userLabels?: {
[key: string]: string;
};
}
/**
* A type of authentication to perform against the specified resource or URL that uses username and password. Currently, only Basic authentication is supported in Uptime Monitoring.
*/
interface Schema$BasicAuthentication {
/**
* The password to authenticate.
*/
password?: string;
/**
* The username to authenticate.
*/
username?: string;
}
/**
* BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite.
*/
interface Schema$BucketOptions {
/**
* The explicit buckets.
*/
explicitBuckets?: Schema$Explicit;
/**
* The exponential buckets.
*/
exponentialBuckets?: Schema$Exponential;
/**
* The linear bucket.
*/
linearBuckets?: Schema$Linear;
}
/**
* A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information.
*/
interface Schema$CollectdPayload {
/**
* The end time of the interval.
*/
endTime?: string;
/**
* The measurement metadata. Example: "process_id" -> 12345
*/
metadata?: {
[key: string]: Schema$TypedValue;
};
/**
* The name of the plugin. Example: "disk".
*/
plugin?: string;
/**
* The instance name of the plugin Example: "hdcl".
*/
pluginInstance?: string;
/**
* The start time of the interval.
*/
startTime?: string;
/**
* The measurement type. Example: "memory".
*/
type?: string;
/**
* The measurement type instance. Example: "used".
*/
typeInstance?: string;
/**
* The measured values during this time interval. Each value must have a different dataSourceName.
*/
values?: Schema$CollectdValue[];
}
/**
* Describes the error status for payloads that were not written.
*/
interface Schema$CollectdPayloadError {
/**
* Records the error status for the payload. If this field is present, the partial errors for nested values won't be populated.
*/
error?: Schema$Status;
/**
* The zero-based index in CreateCollectdTimeSeriesRequest.collectd_payloads.
*/
index?: number;
/**
* Records the error status for values that were not written due to an error.Failed payloads for which nothing is written will not include partial value errors.
*/
valueErrors?: Schema$CollectdValueError[];
}
/**
* A single data point from a collectd-based plugin.
*/
interface Schema$CollectdValue {
/**
* The data source for the collectd value. For example there are two data sources for network measurements: "rx" and "tx".
*/
dataSourceName?: string;
/**
* The type of measurement.
*/
dataSourceType?: string;
/**
* The measurement value.
*/
value?: Schema$TypedValue;
}
/**
* Describes the error status for values that were not written.
*/
interface Schema$CollectdValueError {
/**
* Records the error status for the value.
*/
error?: Schema$Status;
/**
* The zero-based index in CollectdPayload.values within the parent CreateCollectdTimeSeriesRequest.collectd_payloads.
*/
index?: number;
}
/**
* A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong.
*/
interface Schema$Condition {
/**
* A condition that checks that a time series continues to receive new data points.
*/
conditionAbsent?: Schema$MetricAbsence;
/**
* A condition that compares a time series against a threshold.
*/
conditionThreshold?: Schema$MetricThreshold;
/**
* A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy.
*/
displayName?: string;
/**
* Required if the condition exists. The unique resource name for this condition. Its syntax is: projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Stackdriver Monitoring when the condition is created as part of a new or updated alerting policy.When calling the alertPolicies.create method, do not include the name field in the conditions of the requested alerting policy. Stackdriver Monitoring creates the condition identifiers and includes them in the new policy.When calling the alertPolicies.update method to update a policy, including a condition name causes the existing condition to be updated. Conditions without names are added to the updated policy. Existing conditions are deleted if they are not updated.Best practice is to preserve [CONDITION_ID] if you make only small changes, such as those to condition thresholds, durations, or trigger values. Otherwise, treat the change as a new condition and let the existing condition be deleted.
*/
name?: string;
}
/**
* Used to perform string matching. It allows substring and regular expressions, together with their negations.
*/
interface Schema$ContentMatcher {
/**
* String or regex content to match (max 1024 bytes)
*/
content?: string;
}
/**
* The CreateCollectdTimeSeries request.
*/
interface Schema$CreateCollectdTimeSeriesRequest {
/**
* The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance.
*/
collectdPayloads?: Schema$CollectdPayload[];
/**
* The version of collectd that collected the data. Example: "5.3.0-192.el6".
*/
collectdVersion?: string;
/**
* The monitored resource associated with the time series.
*/
resource?: Schema$MonitoredResource;
}
/**
* The CreateCollectdTimeSeries response.
*/
interface Schema$CreateCollectdTimeSeriesResponse {
/**
* Records the error status for points that were not written due to an error.Failed requests for which nothing is written will return an error response instead.
*/
payloadErrors?: Schema$CollectdPayloadError[];
}
/**
* The CreateTimeSeries request.
*/
interface Schema$CreateTimeSeriesRequest {
/**
* The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.The maximum number of TimeSeries objects per Create request is 200.
*/
timeSeries?: Schema$TimeSeries[];
}
/**
* Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless.
*/
interface Schema$Distribution {
/**
* Required in the Stackdriver Monitoring API v3. The values for each bucket specified in bucket_options. The sum of the values in bucketCounts must equal the value in the count field of the Distribution object. The order of the bucket counts follows the numbering schemes described for the three bucket types. The underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; and the overflow bucket has number N-1. The size of bucket_counts must not be greater than N. If the size is less than N, then the remaining buckets are assigned values of zero.
*/
bucketCounts?: string[];
/**
* Required in the Stackdriver Monitoring API v3. Defines the histogram bucket boundaries.
*/
bucketOptions?: Schema$BucketOptions;
/**
* The number of values in the population. Must be non-negative. This value must equal the sum of the values in bucket_counts if a histogram is provided.
*/
count?: string;
/**
* Must be in increasing order of value field.
*/
exemplars?: Schema$Exemplar[];
/**
* The arithmetic mean of the values in the population. If count is zero then this field must be zero.
*/
mean?: number;
/**
* If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Stackdriver Monitoring API v3.
*/
range?: Schema$Range;
/**
* The sum of squared deviations from the mean of the values in the population. For values x_i this is: Sum[i=1..n]((x_i - mean)^2) Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero.
*/
sumOfSquaredDeviation?: number;
}
/**
* A content string and a MIME type that describes the content string's format.
*/
interface Schema$Documentation {
/**
* The text of the documentation, interpreted according to mime_type. The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller.
*/
content?: string;
/**
* The format of the content field. Presently, only the value "text/markdown" is supported. See Markdown (https://en.wikipedia.org/wiki/Markdown) for more information.
*/
mimeType?: string;
}
/**
* A set of (label, value) pairs which were dropped during aggregation, attached to google.api.Distribution.Exemplars in google.api.Distribution values during aggregation.These values are used in combination with the label values that remain on the aggregated Distribution timeseries to construct the full label set for the exemplar values. The resulting full label set may be used to identify the specific task/job/instance (for example) which may be contributing to a long-tail, while allowing the storage savings of only storing aggregated distribution values for a large group.Note that there are no guarantees on ordering of the labels from exemplar-to-exemplar and from distribution-to-distribution in the same stream, and there may be duplicates. It is up to clients to resolve any ambiguities.
*/
interface Schema$DroppedLabels {
/**
* Map from label to its value, for all labels dropped in any aggregation.
*/
label?: {
[key: string]: string;
};
}
/**
* A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for Empty is empty JSON object {}.
*/
interface Schema$Empty {
}
/**
* Exemplars are example points that may be used to annotate aggregated distribution values. They are metadata that gives information about a particular value added to a Distribution bucket, such as a trace ID that was active when a value was added. They may contain further information, such as a example values and timestamps, origin, etc.
*/
interface Schema$Exemplar {
/**
* Contextual information about the example value. Examples are:Trace: type.googleapis.com/google.monitoring.v3.SpanContextLiteral string: type.googleapis.com/google.protobuf.StringValueLabels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system.
*/
attachments?: Array<{
[key: string]: any;
}>;
/**
* The observation (sampling) time of the above value.
*/
timestamp?: string;
/**
* Value of the exemplar point. This value determines to which bucket the exemplar belongs.
*/
value?: number;
}
/**
* Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets.
*/
interface Schema$Explicit {
/**
* The values must be monotonically increasing.
*/
bounds?: number[];
}
/**
* Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).
*/
interface Schema$Exponential {
/**
* Must be greater than 1.
*/
growthFactor?: number;
/**
* Must be greater than 0.
*/
numFiniteBuckets?: number;
/**
* Must be greater than 0.
*/
scale?: number;
}
/**
* A single field of a message type.
*/
interface Schema$Field {
/**
* The field cardinality.
*/
cardinality?: string;
/**
* The string value of the default value of this field. Proto2 syntax only.
*/
defaultValue?: string;
/**
* The field JSON name.
*/
jsonName?: string;
/**
* The field type.
*/
kind?: string;
/**
* The field name.
*/
name?: string;
/**
* The field number.
*/
number?: number;
/**
* The index of the field type in Type.oneofs, for message or enumeration types. The first type has index 1; zero means the type is not in the list.
*/
oneofIndex?: number;
/**
* The protocol buffer options.
*/
options?: Schema$Option[];
/**
* Whether to use alternative packed wire representation.
*/
packed?: boolean;
/**
* The field type URL, without the scheme, for message or enumeration types. Example: "type.googleapis.com/google.protobuf.Timestamp".
*/
typeUrl?: string;
}
/**
* The GetNotificationChannelVerificationCode request.
*/
interface Schema$GetNotificationChannelVerificationCodeRequest {
/**
* The desired expiration time. If specified, the API will guarantee that the returned code will not be valid after the specified timestamp; however, the API cannot guarantee that the returned code will be valid for at least as long as the requested time (the API puts an upper bound on the amount of time for which a code may be valid). If omitted, a default expiration will be used, which may be less than the max permissible expiration (so specifying an expiration may extend the code's lifetime over omitting an expiration, even though the API does impose an upper limit on the maximum expiration that is permitted).
*/
expireTime?: string;
}
/**
* The GetNotificationChannelVerificationCode request.
*/
interface Schema$GetNotificationChannelVerificationCodeResponse {
/**
* The verification code, which may be used to verify other channels that have an equivalent identity (i.e. other channels of the same type with the same fingerprint such as other email channels with the same email address or other sms channels with the same number).
*/
code?: string;
/**
* The expiration time associated with the code that was returned. If an expiration was provided in the request, this is the minimum of the requested expiration in the request and the max permitted expiration.
*/
expireTime?: string;
}
/**
* The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored resources available to be matched by the group's filter are the resources contained in the parent group. In other words, a group contains the monitored resources that match its filter and the filters of all the group's ancestors. A group without a parent can contain any monitored resource.For example, consider an infrastructure running a set of instances with two user-defined tags: "environment" and "role". A parent group has a filter, environment="production". A child of that parent group has a filter, role="transcoder". The parent group contains all instances in the production environment, regardless of their roles. The child group contains instances that have the transcoder role and are in the production environment.The monitored resources contained in a group can change at any moment, depending on what resources exist and what filters are associated with the group and its ancestors.
*/
interface Schema$Group {
/**
* A user-assigned name for this group, used only for display purposes.
*/
displayName?: string;
/**
* The filter used to determine which monitored resources belong to this group.
*/
filter?: string;
/**
* If true, the members of this group are considered to be a cluster. The system can perform additional analysis on groups that are clusters.
*/
isCluster?: boolean;
/**
* Output only. The name of this group. The format is "projects/{project_id_or_number}/groups/{group_id}". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.
*/
name?: string;
/**
* The name of the group's parent, if it has one. The format is "projects/{project_id_or_number}/groups/{group_id}". For groups with no parent, parentName is the empty string, "".
*/
parentName?: string;
}
/**
* Information involved in an HTTP/HTTPS uptime check request.
*/
interface Schema$HttpCheck {
/**
* The authentication information. Optional when creating an HTTP check; defaults to empty.
*/
authInfo?: Schema$BasicAuthentication;
/**
* The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.
*/
headers?: {
[key: string]: string;
};
/**
* Boolean specifiying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.
*/
maskHeaders?: boolean;
/**
* The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to "/"). If the provided path does not begin with "/", it will be prepended automatically.
*/
path?: string;
/**
* The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).
*/
port?: number;
/**
* If true, use HTTPS instead of HTTP to run the check.
*/
useSsl?: boolean;
}
/**
* An internal checker allows uptime checks to run on private/internal GCP resources.
*/
interface Schema$InternalChecker {
/**
* The checker's human-readable name. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced.
*/
displayName?: string;
/**
* The GCP zone the uptime check should egress from. Only respected for internal uptime checks, where internal_network is specified.
*/
gcpZone?: string;
/**
* A unique resource name for this InternalChecker. The format is:projects/[PROJECT_ID]/internalCheckers/[INTERNAL_CHECKER_ID].PROJECT_ID is the stackdriver workspace project for the uptime check config associated with the internal checker.
*/
name?: string;
/**
* The GCP VPC network (https://cloud.google.com/vpc/docs/vpc) where the internal resource lives (ex: "default").
*/
network?: string;
/**
* The GCP project_id where the internal checker lives. Not necessary the same as the workspace project.
*/
peerProjectId?: string;
/**
* The current operational state of the internal checker.
*/
state?: string;
}
/**
* A description of a label.
*/
interface Schema$LabelDescriptor {
/**
* A human-readable description for the label.
*/
description?: string;
/**
* The label key.
*/
key?: string;
/**
* The type of data that can be assigned to the label.
*/
valueType?: string;
}
/**
* Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). Each bucket represents a constant absolute uncertainty on the specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): offset + (width * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).
*/
interface Schema$Linear {
/**
* Must be greater than 0.
*/
numFiniteBuckets?: number;
/**
* Lower bound of the first bucket.
*/
offset?: number;
/**
* Must be greater than 0.
*/
width?: number;
}
/**
* The protocol for the ListAlertPolicies response.
*/
interface Schema$ListAlertPoliciesResponse {
/**
* The returned alert policies.
*/
alertPolicies?: Schema$AlertPolicy[];
/**
* If there might be more results than were returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
}
/**
* The ListGroupMembers response.
*/
interface Schema$ListGroupMembersResponse {
/**
* A set of monitored resources in the group.
*/
members?: Schema$MonitoredResource[];
/**
* If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
/**
* The total number of elements matching this request.
*/
totalSize?: number;
}
/**
* The ListGroups response.
*/
interface Schema$ListGroupsResponse {
/**
* The groups that match the specified filters.
*/
group?: Schema$Group[];
/**
* If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
}
/**
* The ListMetricDescriptors response.
*/
interface Schema$ListMetricDescriptorsResponse {
/**
* The metric descriptors that are available to the project and that match the value of filter, if present.
*/
metricDescriptors?: Schema$MetricDescriptor[];
/**
* If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
}
/**
* The ListMonitoredResourceDescriptors response.
*/
interface Schema$ListMonitoredResourceDescriptorsResponse {
/**
* If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
/**
* The monitored resource descriptors that are available to this project and that match filter, if present.
*/
resourceDescriptors?: Schema$MonitoredResourceDescriptor[];
}
/**
* The ListNotificationChannelDescriptors response.
*/
interface Schema$ListNotificationChannelDescriptorsResponse {
/**
* The monitored resource descriptors supported for the specified project, optionally filtered.
*/
channelDescriptors?: Schema$NotificationChannelDescriptor[];
/**
* If not empty, indicates that there may be more results that match the request. Use the value in the page_token field in a subsequent request to fetch the next set of results. If empty, all results have been returned.
*/
nextPageToken?: string;
}
/**
* The ListNotificationChannels response.
*/
interface Schema$ListNotificationChannelsResponse {
/**
* If not empty, indicates that there may be more results that match the request. Use the value in the page_token field in a subsequent request to fetch the next set of results. If empty, all results have been returned.
*/
nextPageToken?: string;
/**
* The notification channels defined for the specified project.
*/
notificationChannels?: Schema$NotificationChannel[];
}
/**
* The ListTimeSeries response.
*/
interface Schema$ListTimeSeriesResponse {
/**
* Query execution errors that may have caused the time series data returned to be incomplete.
*/
executionErrors?: Schema$Status[];
/**
* If there are more results than have been returned, then this field is set to a non-empty value. To see the additional results, use that value as pageToken in the next call to this method.
*/
nextPageToken?: string;
/**
* One or more time series that match the filter included in the request.
*/
timeSeries?: Schema$TimeSeries[];
}
/**
* The protocol for the ListUptimeCheckConfigs response.
*/
interface Schema$ListUptimeCheckConfigsResponse {
/**
* This field represents the pagination token to retrieve the next page of results. If the value is empty, it means no further results for the request. To retrieve the next page of results, the value of the next_page_token is passed to the subsequent List method call (in the request message's page_token field).
*/
nextPageToken?: string;
/**
* The total number of uptime check configurations for the project, irrespective of any pagination.
*/
totalSize?: number;
/**
* The returned uptime check configurations.
*/
uptimeCheckConfigs?: Schema$UptimeCheckConfig[];
}
/**
* The protocol for the ListUptimeCheckIps response.
*/
interface Schema$ListUptimeCheckIpsResponse {
/**
* This field represents the pagination token to retrieve the next page of results. If the value is empty, it means no further results for the request. To retrieve the next page of results, the value of the next_page_token is passed to the subsequent List method call (in the request message's page_token field). NOTE: this field is not yet implemented
*/
nextPageToken?: string;
/**
* The returned list of IP addresses (including region and location) that the checkers run from.
*/
uptimeCheckIps?: Schema$UptimeCheckIp[];
}
/**
* A specific metric, identified by specifying values for all of the labels of a MetricDescriptor.
*/
interface Schema$Metric {
/**
* The set of label values that uniquely identify this metric. All labels listed in the MetricDescriptor must be assigned values.
*/
labels?: {
[key: string]: string;
};
/**
* An existing metric type, see google.api.MetricDescriptor. For example, custom.googleapis.com/invoice/paid/amount.
*/
type?: string;
}
/**
* A condition type that checks that monitored resources are reporting data. The configuration defines a metric and a set of monitored resources. The predicate is considered in violation when a time series for the specified metric of a monitored resource does not include any data in the specified duration.
*/
interface Schema$MetricAbsence {
/**
* Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resrouces). Multiple aggregations are applied in the order specified.This field is similar to the one in the MetricService.ListTimeSeries request. It is advisable to use the ListTimeSeries method when debugging this field.
*/
aggregations?: Schema$Aggregation[];
/**
* The amount of time that a time series must fail to report new data to be considered failing. Currently, only values that are a multiple of a minute--e.g. 60, 120, or 300 seconds--are supported. If an invalid value is given, an error will be returned. The Duration.nanos field is ignored.
*/
duration?: string;
/**
* A filter that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the MetricService.ListTimeSeries request (that call is useful to verify the time series that will be retrieved / processed) and must specify the metric type and optionally may contain restrictions on resource type, resource labels, and metric labels. This field may not exceed 2048 Unicode characters in length.
*/
filter?: string;
/**
* The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations.
*/
trigger?: Schema$Trigger;
}
/**
* Defines a metric type and its schema. Once a metric descriptor is created, deleting or altering it stops data collection and makes the metric type's existing data unusable.
*/
interface Schema$MetricDescriptor {
/**
* A detailed description of the metric, which can be used in documentation.
*/
description?: string;
/**
* A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example "Request count". This field is optional but it is recommended to be set for any metrics associated with user-visible concepts, such as Quota.
*/
displayName?: string;
/**
* The set of labels that can be used to describe a specific instance of this metric type. For example, the appengine.googleapis.com/http/server/response_latencies metric type has a label for the HTTP response code, response_code, so you can look at latencies for successful responses or just for responses that failed.
*/
labels?: Schema$LabelDescriptor[];
/**
* Optional. The launch stage of the metric definition.
*/
launchStage?: string;
/**
* Optional. Metadata which can be used to guide usage of the metric.
*/
metadata?: Schema$MetricDescriptorMetadata;
/**
* Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metric_kind and value_type might not be supported.
*/
metricKind?: string;
/**
* The resource name of the metric descriptor.
*/
name?: string;
/**
* The metric type, including its DNS name prefix. The type is not URL-encoded. All user-defined metric types have the DNS name custom.googleapis.com or external.googleapis.com. Metric types should use a natural hierarchical grouping. For example: "custom.googleapis.com/invoice/paid/amount" "external.googleapis.com/prometheus/up" "appengine.googleapis.com/http/server/response_latencies"
*/
type?: string;
/**
* The unit in which the metric value is reported. It is only applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The supported units are a subset of The Unified Code for Units of Measure (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT) bit bit By byte s second min minute h hour d dayPrefixes (PREFIX) k kilo (10**3) M mega (10**6) G giga (10**9) T tera (10**12) P peta (10**15) E exa (10**18) Z zetta (10**21) Y yotta (10**24) m milli (10**-3) u micro (10**-6) n nano (10**-9) p pico (10**-12) f femto (10**-15) a atto (10**-18) z zepto (10**-21) y yocto (10**-24) Ki kibi (2**10) Mi mebi (2**20) Gi gibi (2**30) Ti tebi (2**40)GrammarThe grammar also includes these connectors: / division (as an infix operator, e.g. 1/s). . multiplication (as an infix operator, e.g. GBy.d)The grammar for a unit is as follows: Expression = Component { "." Component } { "/" Component } ; Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] | Annotation | "1" ; Annotation = "{" NAME "}" ; Notes: Annotation is just a comment if it follows a UNIT and is equivalent to 1 if it is used alone. For examples, {requests}/s == 1/s, By{transmitted}/s == By/s. NAME is a sequence of non-blank printable ASCII characters not containing '{' or '}'. 1 represents dimensionless value 1, such as in 1/s. % represents dimensionless value 1/100, and annotates values giving a percentage.
*/
unit?: string;
/**
* Whether the measurement is an integer, a floating-point number, etc. Some combinations of metric_kind and value_type might not be supported.
*/
valueType?: string;
}
/**
* Additional annotations that can be used to guide the usage of a metric.
*/
interface Schema$MetricDescriptorMetadata {
/**
* The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors.
*/
ingestDelay?: string;
/**
* Deprecated. Please use the MetricDescriptor.launch_stage instead. The launch stage of the metric definition.
*/
launchStage?: string;
/**
* The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period.
*/
samplePeriod?: string;
}
/**
* A condition type that compares a collection of time series against a threshold.
*/
interface Schema$MetricThreshold {
/**
* Specifies the alignment of data points in individual time series as well as how to combine the retrieved time series together (such as when aggregating multiple streams on each resource to a single stream for each resource or when aggregating streams across all members of a group of resrouces). Multiple aggregations are applied in the order specified.This field is similar to the one in the MetricS