UNPKG

@googleapis/monitoring

Version:
905 lines (896 loc) 544 kB
// Copyright 2020 Google LLC // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-unused-vars */ /* eslint-disable @typescript-eslint/no-empty-interface */ /* eslint-disable @typescript-eslint/no-namespace */ /* eslint-disable no-irregular-whitespace */ import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosResponseWithHTTP2, GoogleConfigurable, createAPIRequest, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext, } from 'googleapis-common'; import {Readable} from 'stream'; export namespace monitoring_v3 { export interface Options extends GlobalOptions { version: 'v3'; } interface StandardParameters { /** * Auth client or API Key for the request */ auth?: | string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth; /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Cloud Monitoring API * * Manages your Cloud Monitoring data and configurations. * * @example * ```js * const {google} = require('googleapis'); * const monitoring = google.monitoring('v3'); * ``` */ export class Monitoring { context: APIRequestContext; folders: Resource$Folders; organizations: Resource$Organizations; projects: Resource$Projects; services: Resource$Services; uptimeCheckIps: Resource$Uptimecheckips; constructor(options: GlobalOptions, google?: GoogleConfigurable) { this.context = { _options: options || {}, google, }; this.folders = new Resource$Folders(this.context); this.organizations = new Resource$Organizations(this.context); this.projects = new Resource$Projects(this.context); this.services = new Resource$Services(this.context); this.uptimeCheckIps = new Resource$Uptimecheckips(this.context); } } /** * Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is aligned to the same time interval boundaries, then the set of time series is optionally reduced in number.Alignment consists of applying the per_series_aligner operation to each time series after its data has been divided into regular alignment_period time intervals. This process takes all of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period.Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a cross_series_reducer to all the time series, optionally sorting the time series into subsets with group_by_fields, and applying the reducer to each subset.The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see Filtering and aggregation (https://cloud.google.com/monitoring/api/v3/aggregation). */ export interface Schema$Aggregation { /** * The alignment_period specifies a time interval, in seconds, that is used to divide the data in all the time series into consistent blocks of time. This will be done before the per-series aligner can be applied to the data.The value must be at least 60 seconds. If a per-series aligner other than ALIGN_NONE is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ALIGN_NONE is specified, then this field is ignored.The maximum value of the alignment_period is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies. */ alignmentPeriod?: string | null; /** * The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series.Not all reducer operations can be applied to all time series. The valid choices depend on the metric_kind and the value_type of the original time series. Reduction can yield a time series with a different metric_kind or value_type than the input time series.Time series data must first be aligned (see per_series_aligner) in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified, and must not be ALIGN_NONE. An alignment_period must also be specified; otherwise, an error is returned. */ crossSeriesReducer?: string | null; /** * The set of fields to preserve when cross_series_reducer is specified. The group_by_fields determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The cross_series_reducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in group_by_fields are aggregated away. If group_by_fields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If cross_series_reducer is not defined, this field is ignored. */ groupByFields?: string[] | null; /** * An Aligner describes how to bring the data points in a single time series into temporal alignment. Except for ALIGN_NONE, all alignments cause all the data points in an alignment_period to be mathematically grouped together, resulting in a single data point for each alignment_period with end timestamp at the end of the period.Not all alignment operations may be applied to all time series. The valid choices depend on the metric_kind and value_type of the original time series. Alignment can change the metric_kind or the value_type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If cross_series_reducer is specified, then per_series_aligner must be specified and not equal to ALIGN_NONE and alignment_period must be specified; otherwise, an error is returned. */ perSeriesAligner?: string | null; } /** * A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alerting policies, see Introduction to Alerting (https://cloud.google.com/monitoring/alerts/). */ export interface Schema$AlertPolicy { /** * Control over how this alerting policy's notification channels are notified. */ alertStrategy?: Schema$AlertStrategy; /** * How to combine the results of multiple conditions to determine if an incident should be opened. If condition_time_series_query_language is present, this must be COMBINE_UNSPECIFIED. */ combiner?: string | null; /** * A list of conditions for the policy. The conditions are combined by AND or OR according to the combiner field. If the combined conditions evaluate to true, then an incident is created. A policy can have from one to six conditions. If condition_time_series_query_language is present, it must be the only condition. If condition_monitoring_query_language is present, it must be the only condition. */ conditions?: Schema$Condition[]; /** * A read-only record of the creation of the alerting policy. If provided in a call to create or update, this field will be ignored. */ creationRecord?: Schema$MutationRecord; /** * A short name or phrase used to identify the policy in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple policies in the same project. The name is limited to 512 Unicode characters.The convention for the display_name of a PrometheusQueryLanguageCondition is "{rule group name\}/{alert name\}", where the {rule group name\} and {alert name\} should be taken from the corresponding Prometheus configuration file. This convention is not enforced. In any case the display_name is not a unique key of the AlertPolicy. */ displayName?: string | null; /** * Documentation that is included with notifications and incidents related to this policy. Best practice is for the documentation to include information to help responders understand, mitigate, escalate, and correct the underlying problems detected by the alerting policy. Notification channels that have limited capacity might not show this documentation. */ documentation?: Schema$Documentation; /** * Whether or not the policy is enabled. On write, the default interpretation if unset is that the policy is enabled. On read, clients should not make any assumption about the state if it has not been populated. The field should always be populated on List and Get operations, unless a field projection has been specified that strips it out. */ enabled?: boolean | null; /** * A read-only record of the most recent change to the alerting policy. If provided in a call to create or update, this field will be ignored. */ mutationRecord?: Schema$MutationRecord; /** * Identifier. Required if the policy exists. The resource name for this policy. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] [ALERT_POLICY_ID] is assigned by Cloud Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request. */ name?: string | null; /** * Identifies the notification channels to which notifications should be sent when incidents are opened or closed or when new violations occur on an already opened incident. Each element of this array corresponds to the name field in each of the NotificationChannel objects that are returned from the ListNotificationChannels method. The format of the entries in this field is: projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID] */ notificationChannels?: string[] | null; /** * Optional. The severity of an alerting policy indicates how important incidents generated by that policy are. The severity level will be displayed on the Incident detail page and in notifications. */ severity?: string | null; /** * User-supplied key/value data to be used for organizing and identifying the AlertPolicy objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.Note that Prometheus {alert name\} is a valid Prometheus label names (https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels), whereas Prometheus {rule group\} is an unrestricted UTF-8 string. This means that they cannot be stored as-is in user labels, because they may contain characters that are not allowed in user-label values. */ userLabels?: {[key: string]: string} | null; /** * Read-only description of how the alerting policy is invalid. This field is only set when the alerting policy is invalid. An invalid alerting policy will not generate incidents. */ validity?: Schema$Status; } /** * Control over how the notification channels in notification_channels are notified when this alert fires. */ export interface Schema$AlertStrategy { /** * If an alerting policy that was active has no data for this long, any open incidents will close */ autoClose?: string | null; /** * Control how notifications will be sent out, on a per-channel basis. */ notificationChannelStrategy?: Schema$NotificationChannelStrategy[]; /** * For log-based alert policies, the notification prompts is always OPENED. For non log-based alert policies, the notification prompts can be OPENED or OPENED, CLOSED. */ notificationPrompts?: string[] | null; /** * Required for log-based alerting policies, i.e. policies with a LogMatch condition.This limit is not implemented for alerting policies that do not have a LogMatch condition. */ notificationRateLimit?: Schema$NotificationRateLimit; } /** * App Engine service. Learn more at https://cloud.google.com/appengine. */ export interface Schema$AppEngine { /** * The ID of the App Engine module underlying this service. Corresponds to the module_id resource label in the gae_app monitored resource (https://cloud.google.com/monitoring/api/resources#tag_gae_app). */ moduleId?: string | null; } /** * Future parameters for the availability SLI. */ export interface Schema$AvailabilityCriteria {} /** * The authentication parameters to provide to the specified resource or URL that requires a username and password. Currently, only Basic HTTP authentication (https://tools.ietf.org/html/rfc7617) is supported in Uptime checks. */ export interface Schema$BasicAuthentication { /** * The password to use when authenticating with the HTTP server. */ password?: string | null; /** * The username to use when authenticating with the HTTP server. */ username?: string | null; } /** * A well-known service type, defined by its service type and service labels. Documentation and examples here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). */ export interface Schema$BasicService { /** * Labels that specify the resource that emits the monitoring data which is used for SLO reporting of this Service. Documentation and valid values for given service types here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). */ serviceLabels?: {[key: string]: string} | null; /** * The type of service that this basic service defines, e.g. APP_ENGINE service type. Documentation and valid values here (https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli). */ serviceType?: string | null; } /** * An SLI measuring performance on a well-known service type. Performance will be computed on the basis of pre-defined metrics. The type of the service_resource determines the metrics to use and the service_resource.labels and metric_labels are used to construct a monitoring filter to filter that metric down to just the data relevant to this service. */ export interface Schema$BasicSli { /** * Good service is defined to be the count of requests made to this service that return successfully. */ availability?: Schema$AvailabilityCriteria; /** * Good service is defined to be the count of requests made to this service that are fast enough with respect to latency.threshold. */ latency?: Schema$LatencyCriteria; /** * OPTIONAL: The set of locations to which this SLI is relevant. Telemetry from other locations will not be used to calculate performance for this SLI. If omitted, this SLI applies to all locations in which the Service has activity. For service types that don't support breaking down by location, setting this field will result in an error. */ location?: string[] | null; /** * OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from other methods will not be used to calculate performance for this SLI. If omitted, this SLI applies to all the Service's methods. For service types that don't support breaking down by method, setting this field will result in an error. */ method?: string[] | null; /** * OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry from other API versions will not be used to calculate performance for this SLI. If omitted, this SLI applies to all API versions. For service types that don't support breaking down by version, setting this field will result in an error. */ version?: string[] | null; } /** * A test that uses an alerting result in a boolean column produced by the SQL query. */ export interface Schema$BooleanTest { /** * Required. The name of the column containing the boolean value. If the value in a row is NULL, that row is ignored. */ column?: string | null; } /** * BucketOptions describes the bucket boundaries used to create a histogram for the distribution. The buckets can be in a linear sequence, an exponential sequence, or each bucket can be specified explicitly. BucketOptions does not include the number of values in each bucket.A bucket has an inclusive lower bound and exclusive upper bound for the values that are counted for that bucket. The upper bound of a bucket must be strictly greater than the lower bound. The sequence of N buckets for a distribution consists of an underflow bucket (number 0), zero or more finite buckets (number 1 through N - 2) and an overflow bucket (number N - 1). The buckets are contiguous: the lower bound of bucket i (i \> 0) is the same as the upper bound of bucket i - 1. The buckets span the whole range of finite values: lower bound of the underflow bucket is -infinity and the upper bound of the overflow bucket is +infinity. The finite buckets are so-called because both bounds are finite. */ export interface Schema$BucketOptions { /** * The explicit buckets. */ explicitBuckets?: Schema$Explicit; /** * The exponential buckets. */ exponentialBuckets?: Schema$Exponential; /** * The linear bucket. */ linearBuckets?: Schema$Linear; } /** * Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints. */ export interface Schema$CloudEndpoints { /** * The name of the Cloud Endpoints service underlying this service. Corresponds to the service resource label in the api monitored resource (https://cloud.google.com/monitoring/api/resources#tag_api). */ service?: string | null; } /** * A Synthetic Monitor deployed to a Cloud Functions V2 instance. */ export interface Schema$CloudFunctionV2Target { /** * Output only. The cloud_run_revision Monitored Resource associated with the GCFv2. The Synthetic Monitor execution results (metrics, logs, and spans) are reported against this Monitored Resource. This field is output only. */ cloudRunRevision?: Schema$MonitoredResource; /** * Required. Fully qualified GCFv2 resource name i.e. projects/{project\}/locations/{location\}/functions/{function\} Required. */ name?: string | null; } /** * Cloud Run service. Learn more at https://cloud.google.com/run. */ export interface Schema$CloudRun { /** * The location the service is run. Corresponds to the location resource label in the cloud_run_revision monitored resource (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). */ location?: string | null; /** * The name of the Cloud Run service. Corresponds to the service_name resource label in the cloud_run_revision monitored resource (https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision). */ serviceName?: string | null; } /** * Istio service scoped to a single Kubernetes cluster. Learn more at https://istio.io. Clusters running OSS Istio will have their services ingested as this type. */ export interface Schema$ClusterIstio { /** * The name of the Kubernetes cluster in which this Istio service is defined. Corresponds to the cluster_name resource label in k8s_cluster resources. */ clusterName?: string | null; /** * The location of the Kubernetes cluster in which this Istio service is defined. Corresponds to the location resource label in k8s_cluster resources. */ location?: string | null; /** * The name of the Istio service underlying this service. Corresponds to the destination_service_name metric label in Istio metrics. */ serviceName?: string | null; /** * The namespace of the Istio service underlying this service. Corresponds to the destination_service_namespace metric label in Istio metrics. */ serviceNamespace?: string | null; } /** * A collection of data points sent from a collectd-based plugin. See the collectd documentation for more information. */ export interface Schema$CollectdPayload { /** * The end time of the interval. */ endTime?: string | null; /** * The measurement metadata. Example: "process_id" -\> 12345 */ metadata?: {[key: string]: Schema$TypedValue} | null; /** * The name of the plugin. Example: "disk". */ plugin?: string | null; /** * The instance name of the plugin Example: "hdcl". */ pluginInstance?: string | null; /** * The start time of the interval. */ startTime?: string | null; /** * The measurement type. Example: "memory". */ type?: string | null; /** * The measurement type instance. Example: "used". */ typeInstance?: string | null; /** * The measured values during this time interval. Each value must have a different data_source_name. */ values?: Schema$CollectdValue[]; } /** * Describes the error status for payloads that were not written. */ export interface Schema$CollectdPayloadError { /** * Records the error status for the payload. If this field is present, the partial errors for nested values won't be populated. */ error?: Schema$Status; /** * The zero-based index in CreateCollectdTimeSeriesRequest.collectd_payloads. */ index?: number | null; /** * Records the error status for values that were not written due to an error.Failed payloads for which nothing is written will not include partial value errors. */ valueErrors?: Schema$CollectdValueError[]; } /** * A single data point from a collectd-based plugin. */ export interface Schema$CollectdValue { /** * The data source for the collectd value. For example, there are two data sources for network measurements: "rx" and "tx". */ dataSourceName?: string | null; /** * The type of measurement. */ dataSourceType?: string | null; /** * The measurement value. */ value?: Schema$TypedValue; } /** * Describes the error status for values that were not written. */ export interface Schema$CollectdValueError { /** * Records the error status for the value. */ error?: Schema$Status; /** * The zero-based index in CollectdPayload.values within the parent CreateCollectdTimeSeriesRequest.collectd_payloads. */ index?: number | null; } /** * A condition is a true/false test that determines when an alerting policy should open an incident. If a condition evaluates to true, it signifies that something is wrong. */ export interface Schema$Condition { /** * A condition that checks that a time series continues to receive new data points. */ conditionAbsent?: Schema$MetricAbsence; /** * A condition that checks for log messages matching given constraints. If set, no other conditions can be present. */ conditionMatchedLog?: Schema$LogMatch; /** * A condition that uses the Monitoring Query Language to define alerts. */ conditionMonitoringQueryLanguage?: Schema$MonitoringQueryLanguageCondition; /** * A condition that uses the Prometheus query language to define alerts. */ conditionPrometheusQueryLanguage?: Schema$PrometheusQueryLanguageCondition; /** * A condition that periodically evaluates a SQL query result. */ conditionSql?: Schema$SqlCondition; /** * A condition that compares a time series against a threshold. */ conditionThreshold?: Schema$MetricThreshold; /** * A short name or phrase used to identify the condition in dashboards, notifications, and incidents. To avoid confusion, don't use the same display name for multiple conditions in the same policy. */ displayName?: string | null; /** * Required if the condition exists. The unique resource name for this condition. Its format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] [CONDITION_ID] is assigned by Cloud Monitoring when the condition is created as part of a new or updated alerting policy.When calling the alertPolicies.create method, do not include the name field in the conditions of the requested alerting policy. Cloud Monitoring creates the condition identifiers and includes them in the new policy.When calling the alertPolicies.update method to update a policy, including a condition name causes the existing condition to be updated. Conditions without names are added to the updated policy. Existing conditions are deleted if they are not updated.Best practice is to preserve [CONDITION_ID] if you make only small changes, such as those to condition thresholds, durations, or trigger values. Otherwise, treat the change as a new condition and let the existing condition be deleted. */ name?: string | null; } /** * Optional. Used to perform content matching. This allows matching based on substrings and regular expressions, together with their negations. Only the first 4 MB of an HTTP or HTTPS check's response (and the first 1 MB of a TCP check's response) are examined for purposes of content matching. */ export interface Schema$ContentMatcher { /** * String, regex or JSON content to match. Maximum 1024 bytes. An empty content string indicates no content matching is to be performed. */ content?: string | null; /** * Matcher information for MATCHES_JSON_PATH and NOT_MATCHES_JSON_PATH */ jsonPathMatcher?: Schema$JsonPathMatcher; /** * The type of content matcher that will be applied to the server output, compared to the content string when the check is run. */ matcher?: string | null; } /** * The CreateCollectdTimeSeries request. */ export interface Schema$CreateCollectdTimeSeriesRequest { /** * The collectd payloads representing the time series data. You must not include more than a single point for each time series, so no two payloads can have the same values for all of the fields plugin, plugin_instance, type, and type_instance. */ collectdPayloads?: Schema$CollectdPayload[]; /** * The version of collectd that collected the data. Example: "5.3.0-192.el6". */ collectdVersion?: string | null; /** * The monitored resource associated with the time series. */ resource?: Schema$MonitoredResource; } /** * The CreateCollectdTimeSeries response. */ export interface Schema$CreateCollectdTimeSeriesResponse { /** * Records the error status for points that were not written due to an error in the request.Failed requests for which nothing is written will return an error response instead. Requests where data points were rejected by the backend will set summary instead. */ payloadErrors?: Schema$CollectdPayloadError[]; /** * Aggregate statistics from writing the payloads. This field is omitted if all points were successfully written, so that the response is empty. This is for backwards compatibility with clients that log errors on any non-empty response. */ summary?: Schema$CreateTimeSeriesSummary; } /** * The CreateTimeSeries request. */ export interface Schema$CreateTimeSeriesRequest { /** * Required. The new data to be added to a list of time series. Adds at most one data point to each of several time series. The new data point must be more recent than any other point in its time series. Each TimeSeries value must fully specify a unique time series by supplying all label values for the metric and the monitored resource.The maximum number of TimeSeries objects per Create request is 200. */ timeSeries?: Schema$TimeSeries[]; } /** * Summary of the result of a failed request to write data to a time series. */ export interface Schema$CreateTimeSeriesSummary { /** * The number of points that failed to be written. Order is not guaranteed. */ errors?: Schema$Error[]; /** * The number of points that were successfully written. */ successPointCount?: number | null; /** * The number of points in the request. */ totalPointCount?: number | null; } /** * Criteria specific to the AlertPolicys that this Snooze applies to. The Snooze will suppress alerts that come from one of the AlertPolicys whose names are supplied. */ export interface Schema$Criteria { /** * Optional. When you define a snooze, you can also define a filter for that snooze. The filter is a string containing one or more key-value pairs. The string uses the standard https://google.aip.dev/160 filter syntax. If you define a filter for a snooze, then the snooze can only apply to one alert policy. When the snooze is active, incidents won't be created when the incident would have key-value pairs (labels) that match those specified by the filter in the snooze.Snooze filters support resource, metric, and metadata labels. If multiple labels are used, then they must be connected with an AND operator. For example, the following filter applies the snooze to incidents that have a resource label with an instance ID of 1234567890, a metric label with an instance name of test_group, a metadata user label with a key of foo and a value of bar, and a metadata system label with a key of region and a value of us-central1: "filter": "resource.labels.instance_id=\"1234567890\" AND metric.labels.instance_name=\"test_group\" AND metadata.user_labels.foo=\"bar\" AND metadata.system_labels.region=\"us-central1\"" */ filter?: string | null; /** * The specific AlertPolicy names for the alert that should be snoozed. The format is: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID] There is a limit of 16 policies per snooze. This limit is checked during snooze creation. Exactly 1 alert policy is required if filter is specified at the same time. */ policies?: string[] | null; } /** * Use a custom service to designate a service that you want to monitor when none of the other service types (like App Engine, Cloud Run, or a GKE type) matches your intended service. */ export interface Schema$Custom {} /** * Used to schedule the query to run every so many days. */ export interface Schema$Daily { /** * Optional. The time of day (in UTC) at which the query should run. If left unspecified, the server picks an arbitrary time of day and runs the query at the same time each day. */ executionTime?: Schema$TimeOfDay; /** * Required. The number of days between runs. Must be greater than or equal to 1 day and less than or equal to 31 days. */ periodicity?: number | null; } /** * Distribution contains summary statistics for a population of values. It optionally contains a histogram representing the distribution of those values across a set of buckets.The summary statistics are the count, mean, sum of the squared deviation from the mean, the minimum, and the maximum of the set of population of values. The histogram is based on a sequence of buckets and gives a count of values that fall into each bucket. The boundaries of the buckets are given either explicitly or by formulas for buckets of fixed or exponentially increasing widths.Although it is not forbidden, it is generally a bad idea to include non-finite values (infinities or NaNs) in the population of values, as this will render the mean and sum_of_squared_deviation fields meaningless. */ export interface Schema$Distribution { /** * Required in the Cloud Monitoring API v3. The values for each bucket specified in bucket_options. The sum of the values in bucketCounts must equal the value in the count field of the Distribution object. The order of the bucket counts follows the numbering schemes described for the three bucket types. The underflow bucket has number 0; the finite buckets, if any, have numbers 1 through N-2; and the overflow bucket has number N-1. The size of bucket_counts must not be greater than N. If the size is less than N, then the remaining buckets are assigned values of zero. */ bucketCounts?: string[] | null; /** * Required in the Cloud Monitoring API v3. Defines the histogram bucket boundaries. */ bucketOptions?: Schema$BucketOptions; /** * The number of values in the population. Must be non-negative. This value must equal the sum of the values in bucket_counts if a histogram is provided. */ count?: string | null; /** * Must be in increasing order of value field. */ exemplars?: Schema$Exemplar[]; /** * The arithmetic mean of the values in the population. If count is zero then this field must be zero. */ mean?: number | null; /** * If specified, contains the range of the population values. The field must not be present if the count is zero. This field is presently ignored by the Cloud Monitoring API v3. */ range?: Schema$Range; /** * The sum of squared deviations from the mean of the values in the population. For values x_i this is: Sum[i=1..n]((x_i - mean)^2) Knuth, "The Art of Computer Programming", Vol. 2, page 232, 3rd edition describes Welford's method for accumulating this sum in one pass.If count is zero then this field must be zero. */ sumOfSquaredDeviation?: number | null; } /** * A DistributionCut defines a TimeSeries and thresholds used for measuring good service and total service. The TimeSeries must have ValueType = DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The computed good_service will be the estimated count of values in the Distribution that fall within the specified min and max. */ export interface Schema$DistributionCut { /** * A monitoring filter (https://cloud.google.com/monitoring/api/v3/filters) specifying a TimeSeries aggregating values. Must have ValueType = DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. */ distributionFilter?: string | null; /** * Range of values considered "good." For a one-sided range, set one bound to an infinite value. */ range?: Schema$GoogleMonitoringV3Range; } /** * Documentation that is included in the notifications and incidents pertaining to this policy. */ export interface Schema$Documentation { /** * The body of the documentation, interpreted according to mime_type. The content may not exceed 8,192 Unicode characters and may not exceed more than 10,240 bytes when encoded in UTF-8 format, whichever is smaller. This text can be templatized by using variables (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). */ content?: string | null; /** * Optional. Links to content such as playbooks, repositories, and other resources. This field can contain up to 3 entries. */ links?: Schema$Link[]; /** * The format of the content field. Presently, only the value "text/markdown" is supported. See Markdown (https://en.wikipedia.org/wiki/Markdown) for more information. */ mimeType?: string | null; /** * Optional. The subject line of the notification. The subject line may not exceed 10,240 bytes. In notifications generated by this policy, the contents of the subject line after variable expansion will be truncated to 255 bytes or shorter at the latest UTF-8 character boundary. The 255-byte limit is recommended by this thread (https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit). It is both the limit imposed by some third-party ticketing products and it is common to define textual fields in databases as VARCHAR(255).The contents of the subject line can be templatized by using variables (https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars). If this field is missing or empty, a default subject line will be generated. */ subject?: string | null; } /** * A set of (label, value) pairs that were removed from a Distribution time series during aggregation and then added as an attachment to a Distribution.Exemplar.The full label set for the exemplars is constructed by using the dropped pairs in combination with the label values that remain on the aggregated Distribution time series. The constructed full label set can be used to identify the specific entity, such as the instance or job, which might be contributing to a long-tail. However, with dropped labels, the storage requirements are reduced because only the aggregated distribution values for a large group of time series are stored.Note that there are no guarantees on ordering of the labels from exemplar-to-exemplar and from distribution-to-distribution in the same stream, and there may be duplicates. It is up to clients to resolve any ambiguities. */ export interface Schema$DroppedLabels { /** * Map from label to its value, for all labels dropped in any aggregation. */ label?: {[key: string]: string} | null; } /** * A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} */ export interface Schema$Empty {} /** * Detailed information about an error category. */ export interface Schema$Error { /** * The number of points that couldn't be written because of status. */ pointCount?: number | null; /** * The status of the requested write operation. */ status?: Schema$Status; } /** * Exemplars are example points that may be used to annotate aggregated distribution values. They are metadata that gives information about a particular value added to a Distribution bucket, such as a trace ID that was active when a value was added. They may contain further information, such as a example values and timestamps, origin, etc. */ export interface Schema$Exemplar { /** * Contextual information about the example value. Examples are:Trace: type.googleapis.com/google.monitoring.v3.SpanContextLiteral string: type.googleapis.com/google.protobuf.StringValueLabels dropped during aggregation: type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be only a single attachment of any given message type in a single exemplar, and this is enforced by the system. */ attachments?: Array<{[key: string]: any}> | null; /** * The observation (sampling) time of the above value. */ timestamp?: string | null; /** * Value of the exemplar point. This value determines to which bucket the exemplar belongs. */ value?: number | null; } /** * Specifies a set of buckets with arbitrary widths.There are size(bounds) + 1 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i < N); boundsi - 1The bounds field must contain at least one element. If bounds has only one element, then there are no finite buckets, and that single element is the common boundary of the overflow and underflow buckets. */ export interface Schema$Explicit { /** * The values must be monotonically increasing. */ bounds?: number[] | null; } /** * Specifies an exponential sequence of buckets that have a width that is proportional to the value of the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has the following boundaries:Upper bound (0 <= i < N-1): scale * (growth_factor ^ i).Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). */ export interface Schema$Exponential { /** * Must be greater than 1. */ growthFactor?: number | null; /** * Must be greater than 0. */ numFiniteBuckets?: number | null; /** * Must be greater than 0. */ scale?: number | null; } /** * A single field of a message type.New usages of this message as an alternative to FieldDescriptorProto are strongly discouraged. This message does not reliability preserve all information necessary to model the schema and preserve semantics. Instead make use of FileDescriptorSet which preserves the necessary information. */ export interface Schema$Field { /** * The field cardinality. */ cardinality?: string | null; /** * The string value of the default value of this field. Proto2 syntax only. */ defaultValue?: string | null; /** * The field JSON name. */ jsonName?: string | null; /** * The field type. */ kind?: string | null; /** * The field name. */ name?: string | null; /** * The field number. */ number?: number | null; /** * The index of the field type in Type.oneofs, for message or enumeration types. The first type has index 1; zero means the type is not in the list. */ oneofIndex?: number | null; /** * The protocol buffer options. */ options?: Schema$Option[]; /** * Whether to use alternative packed wire representation. */ packed?: boolean | null; /** * The field type URL, without the scheme, for message or enumeration types. Example: "type.googleapis.com/google.protobuf.Timestamp". */ typeUrl?: string | null; } /** * Options used when forecasting the time series and testing the predicted value against the threshold. */ export interface Schema$ForecastOptions { /** * Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. */ forecastHorizon?: string | null; } /** * The GetNotificationChannelVerificationCode request. */ export interface Schema$GetNotificationChannelVerificationCodeRequest { /** * The desired expiration time. If specified, the API will guarantee that the returned code will not be valid after the specified timestamp; however, the API cannot guarantee that the returned code will be valid for at least as long as the requested time (the API puts an upper bound on the amount of time for which a code may be valid). If omitted, a default expiration will be used, which may be less than the max permissible expiration (so specifying an expiration may extend the code's lifetime over omitting an expiration, even though the API does impose an upper limit on the maximum expiration that is permitted). */ expireTime?: string | null; } /** * The GetNotificationChannelVerificationCode request. */ export interface Schema$GetNotificationChannelVerificationCodeResponse { /** * The verification code, which may be used to verify other channels that have an equivalent identity (i.e. other channels of the same type with the same fingerprint such as other email channels with the same email address or other sms channels with the same number). */ code?: string | null; /** * The expiration time associated with the code that was returned. If an expiration was provided in the request, this is the minimum of the requested expiration in the request and the max permitted expiration. */ expireTime?: string | null; } /** * GKE Namespace. The field names correspond to the resource metadata labels on monitored resources that fall under a namespace (for example, k8s_container or k8s_pod). */ export interface Schema$GkeNamespace { /** * The name of the parent cluster. */ clusterName?: string | null; /** * The location of the parent cluster. This may be a zone or region. */ location?: string | null; /** * The name of this namespace. */ namespaceName?: string | null; /** * Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself. */ projectId?: string | null; } /** * GKE Service. The "service" here represents a Kubernetes service object (https://kubernetes.io/docs/concepts/services-networking/service). The field names correspond to the resource labels on k8s_service monitored resources (https://cloud.google.com/monitoring/api/resources#tag_k8s_service). */ export interface Schema$GkeService { /** * The name of the parent cluster. */ clusterName?: string | null; /** * The location of the parent cluster. This may be a zone or region. */ location?: string | null; /** * The name of the parent namespace. */ namespaceName?: string | null; /** * Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself. */ projectId?: string | null; /** * The name of this service. */ serviceName?: string | null; } /** * A GKE Workload (Deployment, StatefulSet, etc). The field names correspond to the metadata labels on monitored resources that fall under a workload (for example, k8s_container or k8s_pod). */ export interface Schema$GkeWorkload { /** * The name of the parent cluster. */ clusterName?: string | null; /** * The location of the parent cluster. This may be a zone or region. */ location?: string | null; /** * The name of the parent namespace. */ namespaceName?: string | null; /** * Output only. The project this resource lives in. For legacy services migrated from the Custom type, this may be a distinct project from the one parenting the service itself. */ projectId?: string | null; /** * The name of this workload. */ topLevelControllerName?: string | null; /** * The type of this workload (for example, "Deployment" or "DaemonSet") */ topLevelControllerType?: string | null; } /** * Range of numerical values within min and max. */ export interface Schema$GoogleMonitoringV3Range { /** * Range maximum. */ max?: number | null; /** * Range minimum. */ min?: number | null; } /** * The description of a dynamic collection of monitored resources. Each group has a filter that is matched against monitored resources and their associated metadata. If a group's filter matches an available monitored resource, then that resource is a member of that group. Groups can contain any number of monitored resources, and each monitored resource can be a member of any number of groups.Groups can be nested in parent-child hierarchies. The parentName field identifies an optional parent for each group. If a group has a parent, then the only monitored re