UNPKG

googleapis

Version:
820 lines 687 kB
import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosResponseWithHTTP2, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { Readable } from 'stream'; export declare namespace spanner_v1 { export interface Options extends GlobalOptions { version: 'v1'; } interface StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth; /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Cloud Spanner API * * Cloud Spanner is a managed, mission-critical, globally consistent and scalable relational database service. * * @example * ```js * const {google} = require('googleapis'); * const spanner = google.spanner('v1'); * ``` */ export class Spanner { context: APIRequestContext; projects: Resource$Projects; scans: Resource$Scans; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * Arguments to ack operations. */ export interface Schema$Ack { /** * By default, an attempt to ack a message that does not exist will fail with a `NOT_FOUND` error. With `ignore_not_found` set to true, the ack will succeed even if the message does not exist. This is useful for unconditionally acking a message, even if it is missing or has already been acked. */ ignoreNotFound?: boolean | null; /** * Required. The primary key of the message to be acked. */ key?: any[] | null; /** * Required. The queue where the message to be acked is stored. */ queue?: string | null; } /** * A session in the Cloud Spanner Adapter API. */ export interface Schema$AdapterSession { /** * Identifier. The name of the session. This is always system-assigned. */ name?: string | null; } /** * Message sent by the client to the adapter. */ export interface Schema$AdaptMessageRequest { /** * Optional. Opaque request state passed by the client to the server. */ attachments?: { [key: string]: string; } | null; /** * Optional. Uninterpreted bytes from the underlying wire protocol. */ payload?: string | null; /** * Required. Identifier for the underlying wire protocol. */ protocol?: string | null; } /** * Message sent by the adapter to the client. */ export interface Schema$AdaptMessageResponse { /** * Optional. Indicates whether this is the last AdaptMessageResponse in the stream. This field may be optionally set by the server. Clients should not rely on this field being set in all cases. */ last?: boolean | null; /** * Optional. Uninterpreted bytes from the underlying wire protocol. */ payload?: string | null; /** * Optional. Opaque state updates to be applied by the client. */ stateUpdates?: { [key: string]: string; } | null; } /** * The request for AddSplitPoints. */ export interface Schema$AddSplitPointsRequest { /** * Optional. A user-supplied tag associated with the split points. For example, "initial_data_load", "special_event_1". Defaults to "CloudAddSplitPointsAPI" if not specified. The length of the tag must not exceed 50 characters, or else it is trimmed. Only valid UTF8 characters are allowed. */ initiator?: string | null; /** * Required. The split points to add. */ splitPoints?: Schema$SplitPoints[]; } /** * The response for AddSplitPoints. */ export interface Schema$AddSplitPointsResponse { } /** * AsymmetricAutoscalingOption specifies the scaling of replicas identified by the given selection. */ export interface Schema$AsymmetricAutoscalingOption { /** * Optional. Overrides applied to the top-level autoscaling configuration for the selected replicas. */ overrides?: Schema$AutoscalingConfigOverrides; /** * Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. */ replicaSelection?: Schema$InstanceReplicaSelection; } /** * Autoscaling configuration for an instance. */ export interface Schema$AutoscalingConfig { /** * Optional. Optional asymmetric autoscaling options. Replicas matching the replica selection criteria will be autoscaled independently from other replicas. The autoscaler will scale the replicas based on the utilization of replicas identified by the replica selection. Replica selections should not overlap with each other. Other replicas (those do not match any replica selection) will be autoscaled together and will have the same compute capacity allocated to them. */ asymmetricAutoscalingOptions?: Schema$AsymmetricAutoscalingOption[]; /** * Required. Autoscaling limits for an instance. */ autoscalingLimits?: Schema$AutoscalingLimits; /** * Required. The autoscaling targets for an instance. */ autoscalingTargets?: Schema$AutoscalingTargets; } /** * Overrides the top-level autoscaling configuration for the replicas identified by `replica_selection`. All fields in this message are optional. Any unspecified fields will use the corresponding values from the top-level autoscaling configuration. */ export interface Schema$AutoscalingConfigOverrides { /** * Optional. If specified, overrides the min/max limit in the top-level autoscaling configuration for the selected replicas. */ autoscalingLimits?: Schema$AutoscalingLimits; /** * Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. */ autoscalingTargetHighPriorityCpuUtilizationPercent?: number | null; /** * Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. */ autoscalingTargetTotalCpuUtilizationPercent?: number | null; /** * Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. */ disableHighPriorityCpuAutoscaling?: boolean | null; /** * Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. */ disableTotalCpuAutoscaling?: boolean | null; } /** * The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. */ export interface Schema$AutoscalingLimits { /** * Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes. */ maxNodes?: number | null; /** * Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units. */ maxProcessingUnits?: number | null; /** * Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1. */ minNodes?: number | null; /** * Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. */ minProcessingUnits?: number | null; } /** * The autoscaling targets for an instance. */ export interface Schema$AutoscalingTargets { /** * Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. */ highPriorityCpuUtilizationPercent?: number | null; /** * Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. */ storageUtilizationPercent?: number | null; /** * Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. */ totalCpuUtilizationPercent?: number | null; } /** * A backup of a Cloud Spanner database. */ export interface Schema$Backup { /** * Output only. List of backup schedule URIs that are associated with creating this backup. This is only applicable for scheduled backups, and is empty for on-demand backups. To optimize for storage, whenever possible, multiple schedules are collapsed together to create one backup. In such cases, this field captures the list of all backup schedule URIs that are associated with creating this backup. If collapsing is not done, then this field captures the single backup schedule URI associated with creating this backup. */ backupSchedules?: string[] | null; /** * Output only. The time the CreateBackup request is received. If the request does not specify `version_time`, the `version_time` of the backup will be equivalent to the `create_time`. */ createTime?: string | null; /** * Required for the CreateBackup operation. Name of the database from which this backup was created. This needs to be in the same instance as the backup. Values are of the form `projects//instances//databases/`. */ database?: string | null; /** * Output only. The database dialect information for the backup. */ databaseDialect?: string | null; /** * Output only. The encryption information for the backup. */ encryptionInfo?: Schema$EncryptionInfo; /** * Output only. The encryption information for the backup, whether it is protected by one or more KMS keys. The information includes all Cloud KMS key versions used to encrypt the backup. The `encryption_status` field inside of each `EncryptionInfo` is not populated. At least one of the key versions must be available for the backup to be restored. If a key version is revoked in the middle of a restore, the restore behavior is undefined. */ encryptionInformation?: Schema$EncryptionInfo[]; /** * Output only. For a backup in an incremental backup chain, this is the storage space needed to keep the data that has changed since the previous backup. For all other backups, this is always the size of the backup. This value may change if backups on the same chain get deleted or expired. This field can be used to calculate the total storage space used by a set of backups. For example, the total space used by all backups of a database can be computed by summing up this field. */ exclusiveSizeBytes?: string | null; /** * Required for the CreateBackup operation. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and at most 366 days from the time the CreateBackup request is processed. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. */ expireTime?: string | null; /** * Output only. The number of bytes that will be freed by deleting this backup. This value will be zero if, for example, this backup is part of an incremental backup chain and younger backups in the chain require that we keep its data. For backups not in an incremental backup chain, this is always the size of the backup. This value may change if backups on the same chain get created, deleted or expired. */ freeableSizeBytes?: string | null; /** * Output only. Populated only for backups in an incremental backup chain. Backups share the same chain id if and only if they belong to the same incremental backup chain. Use this field to determine which backups are part of the same incremental backup chain. The ordering of backups in the chain can be determined by ordering the backup `version_time`. */ incrementalBackupChainId?: string | null; /** * Output only. The instance partition storing the backup. This is the same as the list of the instance partitions that the database recorded at the backup's `version_time`. */ instancePartitions?: Schema$BackupInstancePartition[]; /** * Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. */ maxExpireTime?: string | null; /** * Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. */ minimumRestorableEdition?: string | null; /** * Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. */ name?: string | null; /** * Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. */ oldestVersionTime?: string | null; /** * Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. */ referencingBackups?: string[] | null; /** * Output only. The names of the restored databases that reference the backup. The database names are of the form `projects//instances//databases/`. Referencing databases may exist in different instances. The existence of any referencing database prevents the backup from being deleted. When a restored database from the backup enters the `READY` state, the reference to the backup is removed. */ referencingDatabases?: string[] | null; /** * Output only. Size of the backup in bytes. For a backup in an incremental backup chain, this is the sum of the `exclusive_size_bytes` of itself and all older backups in the chain. */ sizeBytes?: string | null; /** * Output only. The current state of the backup. */ state?: string | null; /** * The backup will contain an externally consistent copy of the database at the timestamp specified by `version_time`. If `version_time` is not specified, the system will set `version_time` to the `create_time` of the backup. */ versionTime?: string | null; } /** * Information about a backup. */ export interface Schema$BackupInfo { /** * Name of the backup. */ backup?: string | null; /** * The time the CreateBackup request was received. */ createTime?: string | null; /** * Name of the database the backup was created from. */ sourceDatabase?: string | null; /** * The backup contains an externally consistent copy of `source_database` at the timestamp specified by `version_time`. If the CreateBackup request did not specify `version_time`, the `version_time` of the backup is equivalent to the `create_time`. */ versionTime?: string | null; } /** * Instance partition information for the backup. */ export interface Schema$BackupInstancePartition { /** * A unique identifier for the instance partition. Values are of the form `projects//instances//instancePartitions/` */ instancePartition?: string | null; } /** * BackupSchedule expresses the automated backup creation specification for a Spanner database. */ export interface Schema$BackupSchedule { /** * Optional. The encryption configuration that is used to encrypt the backup. If this field is not specified, the backup uses the same encryption configuration as the database. */ encryptionConfig?: Schema$CreateBackupEncryptionConfig; /** * The schedule creates only full backups. */ fullBackupSpec?: Schema$FullBackupSpec; /** * The schedule creates incremental backup chains. */ incrementalBackupSpec?: Schema$IncrementalBackupSpec; /** * Identifier. Output only for the CreateBackupSchedule operation. Required for the UpdateBackupSchedule operation. A globally unique identifier for the backup schedule which cannot be changed. Values are of the form `projects//instances//databases//backupSchedules/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. */ name?: string | null; /** * Optional. The retention duration of a backup that must be at least 6 hours and at most 366 days. The backup is eligible to be automatically deleted once the retention period has elapsed. */ retentionDuration?: string | null; /** * Optional. The schedule specification based on which the backup creations are triggered. */ spec?: Schema$BackupScheduleSpec; /** * Output only. The timestamp at which the schedule was last updated. If the schedule has never been updated, this field contains the timestamp when the schedule was first created. */ updateTime?: string | null; } /** * Defines specifications of the backup schedule. */ export interface Schema$BackupScheduleSpec { /** * Cron style schedule specification. */ cronSpec?: Schema$CrontabSpec; } /** * The request for BatchCreateSessions. */ export interface Schema$BatchCreateSessionsRequest { /** * Required. The number of sessions to be created in this batch call. At least one session is created. The API can return fewer than the requested number of sessions. If a specific number of sessions are desired, the client can make additional calls to `BatchCreateSessions` (adjusting session_count as necessary). */ sessionCount?: number | null; /** * Parameters to apply to each created session. */ sessionTemplate?: Schema$Session; } /** * The response for BatchCreateSessions. */ export interface Schema$BatchCreateSessionsResponse { /** * The freshly created sessions. */ session?: Schema$Session[]; } /** * The request for BatchWrite. */ export interface Schema$BatchWriteRequest { /** * Optional. If you don't set the `exclude_txn_from_change_streams` option or if it's set to `false`, then any change streams monitoring columns modified by transactions will capture the updates made within that transaction. */ excludeTxnFromChangeStreams?: boolean | null; /** * Required. The groups of mutations to be applied. */ mutationGroups?: Schema$MutationGroup[]; /** * Common options for this request. */ requestOptions?: Schema$RequestOptions; } /** * The result of applying a batch of mutations. */ export interface Schema$BatchWriteResponse { /** * The commit timestamp of the transaction that applied this batch. Present if `status` is `OK`, absent otherwise. */ commitTimestamp?: string | null; /** * The mutation groups applied in this batch. The values index into the `mutation_groups` field in the corresponding `BatchWriteRequest`. */ indexes?: number[] | null; /** * An `OK` status indicates success. Any other status indicates a failure. */ status?: Schema$Status; } /** * The request for BeginTransaction. */ export interface Schema$BeginTransactionRequest { /** * Optional. Required for read-write transactions on a multiplexed session that commit mutations but don't perform any reads or queries. You must randomly select one of the mutations from the mutation set and send it as a part of this request. */ mutationKey?: Schema$Mutation; /** * Required. Options for the new transaction. */ options?: Schema$TransactionOptions; /** * Common options for this request. Priority is ignored for this request. Setting the priority in this `request_options` struct doesn't do anything. To set the priority for a transaction, set it on the reads and writes that are part of this transaction instead. */ requestOptions?: Schema$RequestOptions; } /** * Associates `members`, or principals, with a `role`. */ export interface Schema$Binding { /** * The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ condition?: Schema$Expr; /** * Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid\}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid\}.svc.id.goog[{namespace\}/{kubernetes-sa\}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/group/{group_id\}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/x`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/subject/{subject_attribute_value\}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/group/{group_id\}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/attribute.{attribute_name\}/{attribute_value\}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number\}/locations/global/workloadIdentityPools/{pool_id\}/x`: All identities in a workload identity pool. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id\}/subject/{subject_attribute_value\}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. */ members?: string[] | null; /** * Role that is assigned to the list of `members`, or principals. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview of the IAM roles and permissions, see the [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For a list of the available pre-defined roles, see [here](https://cloud.google.com/iam/docs/understanding-roles). */ role?: string | null; } /** * Metadata type for the long-running operation returned by ChangeQuorum. */ export interface Schema$ChangeQuorumMetadata { /** * If set, the time at which this operation failed or was completed successfully. */ endTime?: string | null; /** * The request for ChangeQuorum. */ request?: Schema$ChangeQuorumRequest; /** * Time the request was received. */ startTime?: string | null; } /** * The request for ChangeQuorum. */ export interface Schema$ChangeQuorumRequest { /** * Optional. The etag is the hash of the `QuorumInfo`. The `ChangeQuorum` operation is only performed if the etag matches that of the `QuorumInfo` in the current database resource. Otherwise the API returns an `ABORTED` error. The etag is used for optimistic concurrency control as a way to help prevent simultaneous change quorum requests that could create a race condition. */ etag?: string | null; /** * Required. Name of the database in which to apply `ChangeQuorum`. Values are of the form `projects//instances//databases/`. */ name?: string | null; /** * Required. The type of this quorum. */ quorumType?: Schema$QuorumType; } /** * Spanner Change Streams enable customers to capture and stream out changes to their Spanner databases in real-time. A change stream can be created with option partition_mode='IMMUTABLE_KEY_RANGE' or partition_mode='MUTABLE_KEY_RANGE'. This message is only used in Change Streams created with the option partition_mode='MUTABLE_KEY_RANGE'. Spanner automatically creates a special Table-Valued Function (TVF) along with each Change Streams. The function provides access to the change stream's records. The function is named READ_ (where is the name of the change stream), and it returns a table with only one column called ChangeRecord. */ export interface Schema$ChangeStreamRecord { /** * Data change record describing a data change for a change stream partition. */ dataChangeRecord?: Schema$DataChangeRecord; /** * Heartbeat record describing a heartbeat for a change stream partition. */ heartbeatRecord?: Schema$HeartbeatRecord; /** * Partition end record describing a terminated change stream partition. */ partitionEndRecord?: Schema$PartitionEndRecord; /** * Partition event record describing key range changes for a change stream partition. */ partitionEventRecord?: Schema$PartitionEventRecord; /** * Partition start record describing a new change stream partition. */ partitionStartRecord?: Schema$PartitionStartRecord; } /** * Metadata associated with a parent-child relationship appearing in a PlanNode. */ export interface Schema$ChildLink { /** * The node to which the link points. */ childIndex?: number | null; /** * The type of the link. For example, in Hash Joins this could be used to distinguish between the build child and the probe child, or in the case of the child being an output variable, to represent the tag associated with the output variable. */ type?: string | null; /** * Only present if the child node is SCALAR and corresponds to an output variable of the parent node. The field carries the name of the output variable. For example, a `TableScan` operator that reads rows from a table will have child links to the `SCALAR` nodes representing the output variables created for each column that is read by the operator. The corresponding `variable` fields will be set to the variable names assigned to the columns. */ variable?: string | null; } /** * Container for various pieces of client-owned context attached to a request. */ export interface Schema$ClientContext { /** * Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). */ secureContext?: { [key: string]: any; } | null; } /** * Metadata for a column. */ export interface Schema$ColumnMetadata { /** * Indicates whether the column is a primary key column. */ isPrimaryKey?: boolean | null; /** * Name of the column. */ name?: string | null; /** * Ordinal position of the column based on the original table definition in the schema starting with a value of 1. */ ordinalPosition?: string | null; /** * Type of the column. */ type?: Schema$Type; } /** * The request for Commit. */ export interface Schema$CommitRequest { /** * Optional. The amount of latency this request is configured to incur in order to improve throughput. If this field isn't set, Spanner assumes requests are relatively latency sensitive and automatically determines an appropriate delay time. You can specify a commit delay value between 0 and 500 ms. */ maxCommitDelay?: string | null; /** * The mutations to be executed when this transaction commits. All mutations are applied atomically, in the order they appear in this list. */ mutations?: Schema$Mutation[]; /** * Optional. If the read-write transaction was executed on a multiplexed session, then you must include the precommit token with the highest sequence number received in this transaction attempt. Failing to do so results in a `FailedPrecondition` error. */ precommitToken?: Schema$MultiplexedSessionPrecommitToken; /** * Common options for this request. */ requestOptions?: Schema$RequestOptions; /** * If `true`, then statistics related to the transaction is included in the CommitResponse. Default value is `false`. */ returnCommitStats?: boolean | null; /** * Execute mutations in a temporary transaction. Note that unlike commit of a previously-started transaction, commit with a temporary transaction is non-idempotent. That is, if the `CommitRequest` is sent to Cloud Spanner more than once (for instance, due to retries in the application, or in the transport library), it's possible that the mutations are executed more than once. If this is undesirable, use BeginTransaction and Commit instead. */ singleUseTransaction?: Schema$TransactionOptions; /** * Commit a previously-started transaction. */ transactionId?: string | null; } /** * The response for Commit. */ export interface Schema$CommitResponse { /** * The statistics about this `Commit`. Not returned by default. For more information, see CommitRequest.return_commit_stats. */ commitStats?: Schema$CommitStats; /** * The Cloud Spanner timestamp at which the transaction committed. */ commitTimestamp?: string | null; /** * If specified, transaction has not committed yet. You must retry the commit with the new precommit token. */ precommitToken?: Schema$MultiplexedSessionPrecommitToken; /** * If `TransactionOptions.isolation_level` is set to `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the timestamp at which all reads in the transaction ran. This timestamp is never returned. */ snapshotTimestamp?: string | null; } /** * Additional statistics about a commit. */ export interface Schema$CommitStats { /** * The total number of mutations for the transaction. Knowing the `mutation_count` value can help you maximize the number of mutations in a transaction and minimize the number of API round trips. You can also monitor this value to prevent transactions from exceeding the system [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data). If the number of mutations exceeds the limit, the server returns [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT). */ mutationCount?: string | null; } /** * Metadata type for the long-running operation returned by `CALL compact_all()`, which can be executed using ExecuteSql or ExecuteStreamingSql APIs. */ export interface Schema$CompactDatabaseMetadata { /** * Output only. The time at which cancellation of this operation was received. Operations.CancelOperation starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. */ cancelTime?: string | null; /** * Output only. The database being compacted. */ database?: string | null; /** * Output only. The progress of the compaction operation. */ progress?: Schema$OperationProgress; } /** * A message representing context for a KeyRangeInfo, including a label, value, unit, and severity. */ export interface Schema$ContextValue { /** * The label for the context value. e.g. "latency". */ label?: Schema$LocalizedString; /** * The severity of this context. */ severity?: string | null; /** * The unit of the context value. */ unit?: string | null; /** * The value for the context. */ value?: number | null; } /** * Encryption configuration for the copied backup. */ export interface Schema$CopyBackupEncryptionConfig { /** * Required. The encryption type of the backup. */ encryptionType?: string | null; /** * Optional. This field is maintained for backwards compatibility. For new callers, we recommend using `kms_key_names` to specify the KMS key. Only use `kms_key_name` if the location of the KMS key matches the database instance's configuration (location) exactly. For example, if the KMS location is in `us-central1` or `nam3`, then the database instance must also be in `us-central1` or `nam3`. The Cloud KMS key that is used to encrypt and decrypt the restored database. Set this field only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. */ kmsKeyName?: string | null; /** * Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. KMS keys specified can be in any order. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. */ kmsKeyNames?: string[] | null; } /** * Metadata type for the operation returned by CopyBackup. */ export interface Schema$CopyBackupMetadata { /** * The time at which cancellation of CopyBackup operation was received. Operations.CancelOperation starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. */ cancelTime?: string | null; /** * The name of the backup being created through the copy operation. Values are of the form `projects//instances//backups/`. */ name?: string | null; /** * The progress of the CopyBackup operation. */ progress?: Schema$OperationProgress; /** * The name of the source backup that is being copied. Values are of the form `projects//instances//backups/`. */ sourceBackup?: string | null; } /** * The request for CopyBackup. */ export interface Schema$CopyBackupRequest { /** * Required. The id of the backup copy. The `backup_id` appended to `parent` forms the full backup_uri of the form `projects//instances//backups/`. */ backupId?: string | null; /** * Optional. The encryption configuration used to encrypt the backup. If this field is not specified, the backup will use the same encryption configuration as the source backup by default, namely encryption_type = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`. */ encryptionConfig?: Schema$CopyBackupEncryptionConfig; /** * Required. The expiration time of the backup in microsecond granularity. The expiration time must be at least 6 hours and at most 366 days from the `create_time` of the source backup. Once the `expire_time` has passed, the backup is eligible to be automatically deleted by Cloud Spanner to free the resources used by the backup. */ expireTime?: string | null; /** * Required. The source backup to be copied. The source backup needs to be in READY state for it to be copied. Once CopyBackup is in progress, the source backup cannot be deleted or cleaned up on expiration until CopyBackup is finished. Values are of the form: `projects//instances//backups/`. */ sourceBackup?: string | null; } /** * Encryption configuration for the backup to create. */ export interface Schema$CreateBackupEncryptionConfig { /** * Required. The encryption type of the backup. */ encryptionType?: string | null; /** * Optional. This field is maintained for backwards compatibility. For new callers, we recommend using `kms_key_names` to specify the KMS key. Only use `kms_key_name` if the location of the KMS key matches the database instance's configuration (location) exactly. For example, if the KMS location is in `us-central1` or `nam3`, then the database instance must also be in `us-central1` or `nam3`. The Cloud KMS key that is used to encrypt and decrypt the restored database. Set this field only when encryption_type is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form `projects//locations//keyRings//cryptoKeys/`. */ kmsKeyName?: string | null; /** * Optional. Specifies the KMS configuration for the one or more keys used to protect the backup. Values are of the form `projects//locations//keyRings//cryptoKeys/`. The keys referenced by `kms_key_names` must fully cover all regions of the backup's instance configuration. Some examples: * For regional (single-region) instance configurations, specify a regional location KMS key. * For multi-region instance configurations of type `GOOGLE_MANAGED`, either specify a multi-region location KMS key or multiple regional location KMS keys that cover all regions in the instance configuration. * For an instance configuration of type `USER_MANAGED`, specify only regional location KMS keys to cover each region in the instance configuration. Multi-region location KMS keys aren't supported for `USER_MANAGED` type instance configurations. */ kmsKeyNames?: string[] | null; } /** * Metadata type for the operation returned by CreateBackup. */ export interface Schema$CreateBackupMetadata { /** * The time at which cancellation of this operation was received. Operations.CancelOperation starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. */ cancelTime?: string | null; /** * The name of the database the backup is created from. */ database?: string | null; /** * The name of the backup being created. */ name?: string | null; /** * The progress of the CreateBackup operation. */ progress?: Schema$OperationProgress; } /** * Metadata type for the operation returned by CreateDatabase. */ export interface Schema$CreateDatabaseMetadata { /** * The database being created. */ database?: string | null; } /** * The request for CreateDatabase. */ export interface Schema$CreateDatabaseRequest { /** * Required. A `CREATE DATABASE` statement, which specifies the ID of the new database. The database ID must conform to the regular expression `a-z*[a-z0-9]` and be between 2 and 30 characters in length. If the database ID is a reserved word or if it contains a hyphen, the database ID must be enclosed in backticks (`` ` ``). */ createStatement?: string | null; /** * Optional. The dialect of the Cloud