googleapis
Version:
Google APIs Client Library for Node.js
1,257 lines • 146 kB
TypeScript
/**
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { AxiosPromise } from 'axios';
import { GoogleApis } from '../..';
import { BodyResponseCallback, GlobalOptions, MethodOptions } from '../../lib/api';
/**
* BigQuery API
*
* A data platform for customers to create, manage, share and query data.
*
* @example
* const google = require('googleapis');
* const bigquery = google.bigquery('v2');
*
* @namespace bigquery
* @type {Function}
* @version v2
* @variation v2
* @param {object=} options Options for Bigquery
*/
export declare class Bigquery {
_options: GlobalOptions;
google: GoogleApis;
root: this;
datasets: Resource$Datasets;
jobs: Resource$Jobs;
projects: Resource$Projects;
tabledata: Resource$Tabledata;
tables: Resource$Tables;
constructor(options: GlobalOptions, google: GoogleApis);
getRoot(): this;
}
export interface Schema$BigtableColumn {
/**
* [Optional] The encoding of the values when the type is not STRING.
* Acceptable encoding values are: TEXT - indicates values are alphanumeric
* text strings. BINARY - indicates values are encoded using HBase
* Bytes.toBytes family of functions. 'encoding' can also be set at
* the column family level. However, the setting at this level takes
* precedence if 'encoding' is set at both levels.
*/
encoding: string;
/**
* [Optional] If the qualifier is not a valid BigQuery field identifier i.e.
* does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided
* as the column field name and is used as field name in queries.
*/
fieldName: string;
/**
* [Optional] If this is set, only the latest version of value in this column
* are exposed. 'onlyReadLatest' can also be set at the column family
* level. However, the setting at this level takes precedence if
* 'onlyReadLatest' is set at both levels.
*/
onlyReadLatest: boolean;
/**
* [Required] Qualifier of the column. Columns in the parent column family
* that has this exact qualifier are exposed as . field. If the qualifier is
* valid UTF-8 string, it can be specified in the qualifier_string field.
* Otherwise, a base-64 encoded value must be set to qualifier_encoded. The
* column field name is the same as the column qualifier. However, if the
* qualifier is not a valid BigQuery field identifier i.e. does not match
* [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.
*/
qualifierEncoded: string;
qualifierString: string;
/**
* [Optional] The type to convert the value in cells of this column. The
* values are expected to be encoded using HBase Bytes.toBytes function when
* using the BINARY encoding value. Following BigQuery types are allowed
* (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is
* BYTES. 'type' can also be set at the column family level. However,
* the setting at this level takes precedence if 'type' is set at both
* levels.
*/
type: string;
}
export interface Schema$BigtableColumnFamily {
/**
* [Optional] Lists of columns that should be exposed as individual fields as
* opposed to a list of (column name, value) pairs. All columns whose
* qualifier matches a qualifier in this list can be accessed as .. Other
* columns can be accessed as a list through .Column field.
*/
columns: Schema$BigtableColumn[];
/**
* [Optional] The encoding of the values when the type is not STRING.
* Acceptable encoding values are: TEXT - indicates values are alphanumeric
* text strings. BINARY - indicates values are encoded using HBase
* Bytes.toBytes family of functions. This can be overridden for a specific
* column by listing that column in 'columns' and specifying an
* encoding for it.
*/
encoding: string;
/**
* Identifier of the column family.
*/
familyId: string;
/**
* [Optional] If this is set only the latest version of value are exposed for
* all columns in this column family. This can be overridden for a specific
* column by listing that column in 'columns' and specifying a
* different setting for that column.
*/
onlyReadLatest: boolean;
/**
* [Optional] The type to convert the value in cells of this column family.
* The values are expected to be encoded using HBase Bytes.toBytes function
* when using the BINARY encoding value. Following BigQuery types are allowed
* (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is
* BYTES. This can be overridden for a specific column by listing that column
* in 'columns' and specifying a type for it.
*/
type: string;
}
export interface Schema$BigtableOptions {
/**
* [Optional] List of column families to expose in the table schema along with
* their types. This list restricts the column families that can be referenced
* in queries and specifies their value types. You can use this list to do
* type conversions - see the 'type' field for more details. If you
* leave this list empty, all column families are present in the table schema
* and their values are read as BYTES. During a query only the column families
* referenced in that query are read from Bigtable.
*/
columnFamilies: Schema$BigtableColumnFamily[];
/**
* [Optional] If field is true, then the column families that are not
* specified in columnFamilies list are not exposed in the table schema.
* Otherwise, they are read with BYTES type values. The default value is
* false.
*/
ignoreUnspecifiedColumnFamilies: boolean;
/**
* [Optional] If field is true, then the rowkey column families will be read
* and converted to string. Otherwise they are read with BYTES type values and
* users need to manually cast them with CAST if necessary. The default value
* is false.
*/
readRowkeyAsString: boolean;
}
export interface Schema$CsvOptions {
/**
* [Optional] Indicates if BigQuery should accept rows that are missing
* trailing optional columns. If true, BigQuery treats missing trailing
* columns as null values. If false, records with missing trailing columns are
* treated as bad records, and if there are too many bad records, an invalid
* error is returned in the job result. The default value is false.
*/
allowJaggedRows: boolean;
/**
* [Optional] Indicates if BigQuery should allow quoted data sections that
* contain newline characters in a CSV file. The default value is false.
*/
allowQuotedNewlines: boolean;
/**
* [Optional] The character encoding of the data. The supported values are
* UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data
* after the raw, binary data has been split using the values of the quote and
* fieldDelimiter properties.
*/
encoding: string;
/**
* [Optional] The separator for fields in a CSV file. BigQuery converts the
* string to ISO-8859-1 encoding, and then uses the first byte of the encoded
* string to split the data in its raw, binary state. BigQuery also supports
* the escape sequence "\t" to specify a tab separator. The default
* value is a comma (',').
*/
fieldDelimiter: string;
/**
* [Optional] The value that is used to quote data sections in a CSV file.
* BigQuery converts the string to ISO-8859-1 encoding, and then uses the
* first byte of the encoded string to split the data in its raw, binary
* state. The default value is a double-quote ('"'). If your data
* does not contain quoted sections, set the property value to an empty
* string. If your data contains quoted newline characters, you must also set
* the allowQuotedNewlines property to true.
*/
quote: string;
/**
* [Optional] The number of rows at the top of a CSV file that BigQuery will
* skip when reading the data. The default value is 0. This property is useful
* if you have header rows in the file that should be skipped.
*/
skipLeadingRows: string;
}
export interface Schema$Dataset {
/**
* [Optional] An array of objects that define dataset access for one or more
* entities. You can set this property when inserting or updating a dataset in
* order to control who is allowed to access the data. If unspecified at
* dataset creation time, BigQuery adds default dataset access for the
* following entities: access.specialGroup: projectReaders; access.role:
* READER; access.specialGroup: projectWriters; access.role: WRITER;
* access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail:
* [dataset creator email]; access.role: OWNER;
*/
access: any[];
/**
* [Output-only] The time when this dataset was created, in milliseconds since
* the epoch.
*/
creationTime: string;
/**
* [Required] A reference that identifies the dataset.
*/
datasetReference: Schema$DatasetReference;
/**
* [Optional] The default lifetime of all tables in the dataset, in
* milliseconds. The minimum value is 3600000 milliseconds (one hour). Once
* this property is set, all newly-created tables in the dataset will have an
* expirationTime property set to the creation time plus the value in this
* property, and changing the value will only affect new tables, not existing
* ones. When the expirationTime for a given table is reached, that table will
* be deleted automatically. If a table's expirationTime is modified or
* removed before the table expires, or if you provide an explicit
* expirationTime when creating a table, that value takes precedence over the
* default expiration time indicated by this property.
*/
defaultTableExpirationMs: string;
/**
* [Optional] A user-friendly description of the dataset.
*/
description: string;
/**
* [Output-only] A hash of the resource.
*/
etag: string;
/**
* [Optional] A descriptive name for the dataset.
*/
friendlyName: string;
/**
* [Output-only] The fully-qualified unique name of the dataset in the format
* projectId:datasetId. The dataset name without the project name is given in
* the datasetId field. When creating a new dataset, leave this field blank,
* and instead specify the datasetId field.
*/
id: string;
/**
* [Output-only] The resource type.
*/
kind: string;
/**
* The labels associated with this dataset. You can use these to organize and
* group your datasets. You can set this property when inserting or updating a
* dataset. See Labeling Datasets for more information.
*/
labels: any;
/**
* [Output-only] The date when this dataset or any of its tables was last
* modified, in milliseconds since the epoch.
*/
lastModifiedTime: string;
/**
* The geographic location where the dataset should reside. The default value
* is US.
*/
location: string;
/**
* [Output-only] A URL that can be used to access the resource again. You can
* use this URL in Get or Update requests to the resource.
*/
selfLink: string;
}
export interface Schema$DatasetList {
/**
* An array of the dataset resources in the project. Each resource contains
* basic information. For full information about a particular dataset
* resource, use the Datasets: get method. This property is omitted when there
* are no datasets in the project.
*/
datasets: any[];
/**
* A hash value of the results page. You can use this property to determine if
* the page has changed since the last request.
*/
etag: string;
/**
* The list type. This property always returns the value
* "bigquery#datasetList".
*/
kind: string;
/**
* A token that can be used to request the next results page. This property is
* omitted on the final results page.
*/
nextPageToken: string;
}
export interface Schema$DatasetReference {
/**
* [Required] A unique ID for this dataset, without the project name. The ID
* must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
* The maximum length is 1,024 characters.
*/
datasetId: string;
/**
* [Optional] The ID of the project containing this dataset.
*/
projectId: string;
}
export interface Schema$DestinationTableProperties {
/**
* [Optional] The description for the destination table. This will only be
* used if the destination table is newly created. If the table already exists
* and a value different than the current description is provided, the job
* will fail.
*/
description: string;
/**
* [Optional] The friendly name for the destination table. This will only be
* used if the destination table is newly created. If the table already exists
* and a value different than the current friendly name is provided, the job
* will fail.
*/
friendlyName: string;
}
export interface Schema$EncryptionConfiguration {
/**
* [Optional] Describes the Cloud KMS encryption key that will be used to
* protect destination BigQuery table. The BigQuery Service Account associated
* with your project requires access to this encryption key.
*/
kmsKeyName: string;
}
export interface Schema$ErrorProto {
/**
* Debugging information. This property is internal to Google and should not
* be used.
*/
debugInfo: string;
/**
* Specifies where the error occurred, if present.
*/
location: string;
/**
* A human-readable description of the error.
*/
message: string;
/**
* A short error code that summarizes the error.
*/
reason: string;
}
export interface Schema$ExplainQueryStage {
/**
* Number of parallel input segments completed.
*/
completedParallelInputs: string;
/**
* Milliseconds the average shard spent on CPU-bound tasks.
*/
computeMsAvg: string;
/**
* Milliseconds the slowest shard spent on CPU-bound tasks.
*/
computeMsMax: string;
/**
* Relative amount of time the average shard spent on CPU-bound tasks.
*/
computeRatioAvg: number;
/**
* Relative amount of time the slowest shard spent on CPU-bound tasks.
*/
computeRatioMax: number;
/**
* Stage end time in milliseconds.
*/
endMs: string;
/**
* Unique ID for stage within plan.
*/
id: string;
/**
* IDs for stages that are inputs to this stage.
*/
inputStages: string[];
/**
* Human-readable name for stage.
*/
name: string;
/**
* Number of parallel input segments to be processed.
*/
parallelInputs: string;
/**
* Milliseconds the average shard spent reading input.
*/
readMsAvg: string;
/**
* Milliseconds the slowest shard spent reading input.
*/
readMsMax: string;
/**
* Relative amount of time the average shard spent reading input.
*/
readRatioAvg: number;
/**
* Relative amount of time the slowest shard spent reading input.
*/
readRatioMax: number;
/**
* Number of records read into the stage.
*/
recordsRead: string;
/**
* Number of records written by the stage.
*/
recordsWritten: string;
/**
* Total number of bytes written to shuffle.
*/
shuffleOutputBytes: string;
/**
* Total number of bytes written to shuffle and spilled to disk.
*/
shuffleOutputBytesSpilled: string;
/**
* Stage start time in milliseconds.
*/
startMs: string;
/**
* Current status for the stage.
*/
status: string;
/**
* List of operations within the stage in dependency order (approximately
* chronological).
*/
steps: Schema$ExplainQueryStep[];
/**
* Milliseconds the average shard spent waiting to be scheduled.
*/
waitMsAvg: string;
/**
* Milliseconds the slowest shard spent waiting to be scheduled.
*/
waitMsMax: string;
/**
* Relative amount of time the average shard spent waiting to be scheduled.
*/
waitRatioAvg: number;
/**
* Relative amount of time the slowest shard spent waiting to be scheduled.
*/
waitRatioMax: number;
/**
* Milliseconds the average shard spent on writing output.
*/
writeMsAvg: string;
/**
* Milliseconds the slowest shard spent on writing output.
*/
writeMsMax: string;
/**
* Relative amount of time the average shard spent on writing output.
*/
writeRatioAvg: number;
/**
* Relative amount of time the slowest shard spent on writing output.
*/
writeRatioMax: number;
}
export interface Schema$ExplainQueryStep {
/**
* Machine-readable operation type.
*/
kind: string;
/**
* Human-readable stage descriptions.
*/
substeps: string[];
}
export interface Schema$ExternalDataConfiguration {
/**
* Try to detect schema and format options automatically. Any option specified
* explicitly will be honored.
*/
autodetect: boolean;
/**
* [Optional] Additional options if sourceFormat is set to BIGTABLE.
*/
bigtableOptions: Schema$BigtableOptions;
/**
* [Optional] The compression type of the data source. Possible values include
* GZIP and NONE. The default value is NONE. This setting is ignored for
* Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
*/
compression: string;
/**
* Additional properties to set if sourceFormat is set to CSV.
*/
csvOptions: Schema$CsvOptions;
/**
* [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS.
*/
googleSheetsOptions: Schema$GoogleSheetsOptions;
/**
* [Optional] Indicates if BigQuery should allow extra values that are not
* represented in the table schema. If true, the extra values are ignored. If
* false, records with extra columns are treated as bad records, and if there
* are too many bad records, an invalid error is returned in the job result.
* The default value is false. The sourceFormat property determines what
* BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values
* that don't match any column names Google Cloud Bigtable: This setting
* is ignored. Google Cloud Datastore backups: This setting is ignored. Avro:
* This setting is ignored.
*/
ignoreUnknownValues: boolean;
/**
* [Optional] The maximum number of bad records that BigQuery can ignore when
* reading data. If the number of bad records exceeds this value, an invalid
* error is returned in the job result. The default value is 0, which requires
* that all records are valid. This setting is ignored for Google Cloud
* Bigtable, Google Cloud Datastore backups and Avro formats.
*/
maxBadRecords: number;
/**
* [Optional] The schema for the data. Schema is required for CSV and JSON
* formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore
* backups, and Avro formats.
*/
schema: Schema$TableSchema;
/**
* [Required] The data format. For CSV files, specify "CSV". For
* Google sheets, specify "GOOGLE_SHEETS". For newline-delimited
* JSON, specify "NEWLINE_DELIMITED_JSON". For Avro files, specify
* "AVRO". For Google Cloud Datastore backups, specify
* "DATASTORE_BACKUP". [Beta] For Google Cloud Bigtable, specify
* "BIGTABLE".
*/
sourceFormat: string;
/**
* [Required] The fully-qualified URIs that point to your data in Google
* Cloud. For Google Cloud Storage URIs: Each URI can contain one '*'
* wildcard character and it must come after the 'bucket' name. Size
* limits related to load jobs apply to external data sources. For Google
* Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully
* specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google
* Cloud Datastore backups, exactly one URI can be specified. Also, the
* '*' wildcard character is not allowed.
*/
sourceUris: string[];
}
export interface Schema$GetQueryResultsResponse {
/**
* Whether the query result was fetched from the query cache.
*/
cacheHit: boolean;
/**
* [Output-only] The first errors or warnings encountered during the running
* of the job. The final message includes the number of errors that caused the
* process to stop. Errors here do not necessarily mean that the job has
* completed or was unsuccessful.
*/
errors: Schema$ErrorProto[];
/**
* A hash of this response.
*/
etag: string;
/**
* Whether the query has completed or not. If rows or totalRows are present,
* this will always be true. If this is false, totalRows will not be
* available.
*/
jobComplete: boolean;
/**
* Reference to the BigQuery Job that was created to run the query. This field
* will be present even if the original request timed out, in which case
* GetQueryResults can be used to read the results once the query has
* completed. Since this API only returns the first page of results,
* subsequent pages can be fetched via the same mechanism (GetQueryResults).
*/
jobReference: Schema$JobReference;
/**
* The resource type of the response.
*/
kind: string;
/**
* [Output-only] The number of rows affected by a DML statement. Present only
* for DML statements INSERT, UPDATE or DELETE.
*/
numDmlAffectedRows: string;
/**
* A token used for paging results.
*/
pageToken: string;
/**
* An object with as many results as can be contained within the maximum
* permitted reply size. To get any additional rows, you can call
* GetQueryResults and specify the jobReference returned above. Present only
* when the query completes successfully.
*/
rows: Schema$TableRow[];
/**
* The schema of the results. Present only when the query completes
* successfully.
*/
schema: Schema$TableSchema;
/**
* The total number of bytes processed for this query.
*/
totalBytesProcessed: string;
/**
* The total number of rows in the complete query result set, which can be
* more than the number of rows in this single page of results. Present only
* when the query completes successfully.
*/
totalRows: string;
}
export interface Schema$GetServiceAccountResponse {
/**
* The service account email address.
*/
email: string;
/**
* The resource type of the response.
*/
kind: string;
}
export interface Schema$GoogleSheetsOptions {
/**
* [Optional] The number of rows at the top of a sheet that BigQuery will skip
* when reading the data. The default value is 0. This property is useful if
* you have header rows that should be skipped. When autodetect is on,
* behavior is the following: * skipLeadingRows unspecified - Autodetect tries
* to detect headers in the first row. If they are not detected, the row is
* read as data. Otherwise data is read starting from the second row. *
* skipLeadingRows is 0 - Instructs autodetect that there are no headers and
* data should be read starting from the first row. * skipLeadingRows = N >
* 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If
* headers are not detected, row N is just skipped. Otherwise row N is used to
* extract column names for the detected schema.
*/
skipLeadingRows: string;
}
export interface Schema$Job {
/**
* [Required] Describes the job configuration.
*/
configuration: Schema$JobConfiguration;
/**
* [Output-only] A hash of this resource.
*/
etag: string;
/**
* [Output-only] Opaque ID field of the job
*/
id: string;
/**
* [Optional] Reference describing the unique-per-user name of the job.
*/
jobReference: Schema$JobReference;
/**
* [Output-only] The type of the resource.
*/
kind: string;
/**
* [Output-only] A URL that can be used to access this resource again.
*/
selfLink: string;
/**
* [Output-only] Information about the job, including starting time and ending
* time of the job.
*/
statistics: Schema$JobStatistics;
/**
* [Output-only] The status of this job. Examine this value when polling an
* asynchronous job to see if the job is complete.
*/
status: Schema$JobStatus;
/**
* [Output-only] Email address of the user who ran the job.
*/
user_email: string;
}
export interface Schema$JobCancelResponse {
/**
* The final state of the job.
*/
job: Schema$Job;
/**
* The resource type of the response.
*/
kind: string;
}
export interface Schema$JobConfiguration {
/**
* [Pick one] Copies a table.
*/
copy: Schema$JobConfigurationTableCopy;
/**
* [Optional] If set, don't actually run this job. A valid query will
* return a mostly empty response with some processing statistics, while an
* invalid query will return the same error it would if it wasn't a dry
* run. Behavior of non-query jobs is undefined.
*/
dryRun: boolean;
/**
* [Pick one] Configures an extract job.
*/
extract: Schema$JobConfigurationExtract;
/**
* [Optional] Job timeout in milliseconds. If this time limit is exceeded,
* BigQuery may attempt to terminate the job.
*/
jobTimeoutMs: string;
/**
* The labels associated with this job. You can use these to organize and
* group your jobs. Label keys and values can be no longer than 63 characters,
* can only contain lowercase letters, numeric characters, underscores and
* dashes. International characters are allowed. Label values are optional.
* Label keys must start with a letter and each label in the list must have a
* different key.
*/
labels: any;
/**
* [Pick one] Configures a load job.
*/
load: Schema$JobConfigurationLoad;
/**
* [Pick one] Configures a query job.
*/
query: Schema$JobConfigurationQuery;
}
export interface Schema$JobConfigurationExtract {
/**
* [Optional] The compression type to use for exported files. Possible values
* include GZIP, DEFLATE, SNAPPY, and NONE. The default value is NONE. DEFLATE
* and SNAPPY are only supported for Avro.
*/
compression: string;
/**
* [Optional] The exported file format. Possible values include CSV,
* NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with
* nested or repeated fields cannot be exported as CSV.
*/
destinationFormat: string;
/**
* [Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as
* necessary. The fully-qualified Google Cloud Storage URI where the extracted
* table should be written.
*/
destinationUri: string;
/**
* [Pick one] A list of fully-qualified Google Cloud Storage URIs where the
* extracted table should be written.
*/
destinationUris: string[];
/**
* [Optional] Delimiter to use between fields in the exported data. Default is
* ','
*/
fieldDelimiter: string;
/**
* [Optional] Whether to print out a header row in the results. Default is
* true.
*/
printHeader: boolean;
/**
* [Required] A reference to the table being exported.
*/
sourceTable: Schema$TableReference;
}
export interface Schema$JobConfigurationLoad {
/**
* [Optional] Accept rows that are missing trailing optional columns. The
* missing values are treated as nulls. If false, records with missing
* trailing columns are treated as bad records, and if there are too many bad
* records, an invalid error is returned in the job result. The default value
* is false. Only applicable to CSV, ignored for other formats.
*/
allowJaggedRows: boolean;
/**
* Indicates if BigQuery should allow quoted data sections that contain
* newline characters in a CSV file. The default value is false.
*/
allowQuotedNewlines: boolean;
/**
* Indicates if we should automatically infer the options and schema for CSV
* and JSON sources.
*/
autodetect: boolean;
/**
* [Optional] Specifies whether the job is allowed to create new tables. The
* following values are supported: CREATE_IF_NEEDED: If the table does not
* exist, BigQuery creates the table. CREATE_NEVER: The table must already
* exist. If it does not, a 'notFound' error is returned in the job
* result. The default value is CREATE_IF_NEEDED. Creation, truncation and
* append actions occur as one atomic update upon job completion.
*/
createDisposition: string;
/**
* Custom encryption configuration (e.g., Cloud KMS keys).
*/
destinationEncryptionConfiguration: Schema$EncryptionConfiguration;
/**
* [Required] The destination table to load the data into.
*/
destinationTable: Schema$TableReference;
/**
* [Experimental] [Optional] Properties with which to create the destination
* table if it is new.
*/
destinationTableProperties: Schema$DestinationTableProperties;
/**
* [Optional] The character encoding of the data. The supported values are
* UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data
* after the raw, binary data has been split using the values of the quote and
* fieldDelimiter properties.
*/
encoding: string;
/**
* [Optional] The separator for fields in a CSV file. The separator can be any
* ISO-8859-1 single-byte character. To use a character in the range 128-255,
* you must encode the character as UTF8. BigQuery converts the string to
* ISO-8859-1 encoding, and then uses the first byte of the encoded string to
* split the data in its raw, binary state. BigQuery also supports the escape
* sequence "\t" to specify a tab separator. The default value is a
* comma (',').
*/
fieldDelimiter: string;
/**
* [Optional] Indicates if BigQuery should allow extra values that are not
* represented in the table schema. If true, the extra values are ignored. If
* false, records with extra columns are treated as bad records, and if there
* are too many bad records, an invalid error is returned in the job result.
* The default value is false. The sourceFormat property determines what
* BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values
* that don't match any column names
*/
ignoreUnknownValues: boolean;
/**
* [Optional] The maximum number of bad records that BigQuery can ignore when
* running the job. If the number of bad records exceeds this value, an
* invalid error is returned in the job result. The default value is 0, which
* requires that all records are valid.
*/
maxBadRecords: number;
/**
* [Optional] Specifies a string that represents a null value in a CSV file.
* For example, if you specify "x/", BigQuery interprets
* "x/" as a null value when loading a CSV file. The default value
* is the empty string. If you set this property to a custom value, BigQuery
* throws an error if an empty string is present for all data types except for
* STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty
* string as an empty value.
*/
nullMarker: string;
/**
* If sourceFormat is set to "DATASTORE_BACKUP", indicates which
* entity properties to load into BigQuery from a Cloud Datastore backup.
* Property names are case sensitive and must be top-level properties. If no
* properties are specified, BigQuery loads all properties. If any named
* property isn't found in the Cloud Datastore backup, an invalid error is
* returned in the job result.
*/
projectionFields: string[];
/**
* [Optional] The value that is used to quote data sections in a CSV file.
* BigQuery converts the string to ISO-8859-1 encoding, and then uses the
* first byte of the encoded string to split the data in its raw, binary
* state. The default value is a double-quote ('"'). If your data
* does not contain quoted sections, set the property value to an empty
* string. If your data contains quoted newline characters, you must also set
* the allowQuotedNewlines property to true.
*/
quote: string;
/**
* [Optional] The schema for the destination table. The schema can be omitted
* if the destination table already exists, or if you're loading data from
* Google Cloud Datastore.
*/
schema: Schema$TableSchema;
/**
* [Deprecated] The inline schema. For CSV schemas, specify as
* "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING,
* bar:INTEGER, baz:FLOAT".
*/
schemaInline: string;
/**
* [Deprecated] The format of the schemaInline property.
*/
schemaInlineFormat: string;
/**
* Allows the schema of the destination table to be updated as a side effect
* of the load job if a schema is autodetected or supplied in the job
* configuration. Schema update options are supported in two cases: when
* writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE
* and the destination table is a partition of a table, specified by partition
* decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
* schema. One or more of the following values are specified:
* ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
* ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
* schema to nullable.
*/
schemaUpdateOptions: string[];
/**
* [Optional] The number of rows at the top of a CSV file that BigQuery will
* skip when loading the data. The default value is 0. This property is useful
* if you have header rows in the file that should be skipped.
*/
skipLeadingRows: number;
/**
* [Optional] The format of the data files. For CSV files, specify
* "CSV". For datastore backups, specify
* "DATASTORE_BACKUP". For newline-delimited JSON, specify
* "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For
* parquet, specify "PARQUET". For orc, specify "ORC". The
* default value is CSV.
*/
sourceFormat: string;
/**
* [Required] The fully-qualified URIs that point to your data in Google
* Cloud. For Google Cloud Storage URIs: Each URI can contain one '*'
* wildcard character and it must come after the 'bucket' name. Size
* limits related to load jobs apply to external data sources. For Google
* Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully
* specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google
* Cloud Datastore backups: Exactly one URI can be specified. Also, the
* '*' wildcard character is not allowed.
*/
sourceUris: string[];
/**
* If specified, configures time-based partitioning for the destination table.
*/
timePartitioning: Schema$TimePartitioning;
/**
* [Optional] Specifies the action that occurs if the destination table
* already exists. The following values are supported: WRITE_TRUNCATE: If the
* table already exists, BigQuery overwrites the table data. WRITE_APPEND: If
* the table already exists, BigQuery appends the data to the table.
* WRITE_EMPTY: If the table already exists and contains data, a
* 'duplicate' error is returned in the job result. The default value
* is WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able
* to complete the job successfully. Creation, truncation and append actions
* occur as one atomic update upon job completion.
*/
writeDisposition: string;
}
export interface Schema$JobConfigurationQuery {
/**
* [Optional] If true and query uses legacy SQL dialect, allows the query to
* produce arbitrarily large result tables at a slight cost in performance.
* Requires destinationTable to be set. For standard SQL queries, this flag is
* ignored and large results are always allowed. However, you must still set
* destinationTable when result size exceeds the allowed maximum response
* size.
*/
allowLargeResults: boolean;
/**
* [Optional] Specifies whether the job is allowed to create new tables. The
* following values are supported: CREATE_IF_NEEDED: If the table does not
* exist, BigQuery creates the table. CREATE_NEVER: The table must already
* exist. If it does not, a 'notFound' error is returned in the job
* result. The default value is CREATE_IF_NEEDED. Creation, truncation and
* append actions occur as one atomic update upon job completion.
*/
createDisposition: string;
/**
* [Optional] Specifies the default dataset to use for unqualified table names
* in the query.
*/
defaultDataset: Schema$DatasetReference;
/**
* Custom encryption configuration (e.g., Cloud KMS keys).
*/
destinationEncryptionConfiguration: Schema$EncryptionConfiguration;
/**
* [Optional] Describes the table where the query results should be stored. If
* not present, a new table will be created to store the results. This
* property must be set for large results that exceed the maximum response
* size.
*/
destinationTable: Schema$TableReference;
/**
* [Optional] If true and query uses legacy SQL dialect, flattens all nested
* and repeated fields in the query results. allowLargeResults must be true if
* this is set to false. For standard SQL queries, this flag is ignored and
* results are never flattened.
*/
flattenResults: boolean;
/**
* [Optional] Limits the billing tier for this job. Queries that have resource
* usage beyond this tier will fail (without incurring a charge). If
* unspecified, this will be set to your project default.
*/
maximumBillingTier: number;
/**
* [Optional] Limits the bytes billed for this job. Queries that will have
* bytes billed beyond this limit will fail (without incurring a charge). If
* unspecified, this will be set to your project default.
*/
maximumBytesBilled: string;
/**
* Standard SQL only. Set to POSITIONAL to use positional (?) query parameters
* or to NAMED to use named (@myparam) query parameters in this query.
*/
parameterMode: string;
/**
* [Deprecated] This property is deprecated.
*/
preserveNulls: boolean;
/**
* [Optional] Specifies a priority for the query. Possible values include
* INTERACTIVE and BATCH. The default value is INTERACTIVE.
*/
priority: string;
/**
* [Required] SQL query text to execute. The useLegacySql field can be used to
* indicate whether the query uses legacy SQL or standard SQL.
*/
query: string;
/**
* Query parameters for standard SQL queries.
*/
queryParameters: Schema$QueryParameter[];
/**
* Allows the schema of the destination table to be updated as a side effect
* of the query job. Schema update options are supported in two cases: when
* writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE
* and the destination table is a partition of a table, specified by partition
* decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
* schema. One or more of the following values are specified:
* ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
* ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
* schema to nullable.
*/
schemaUpdateOptions: string[];
/**
* [Optional] If querying an external data source outside of BigQuery,
* describes the data format, location and other properties of the data
* source. By defining these properties, the data source can then be queried
* as if it were a standard BigQuery table.
*/
tableDefinitions: any;
/**
* If specified, configures time-based partitioning for the destination table.
*/
timePartitioning: Schema$TimePartitioning;
/**
* Specifies whether to use BigQuery's legacy SQL dialect for this query.
* The default value is true. If set to false, the query will use
* BigQuery's standard SQL:
* https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set
* to false, the value of flattenResults is ignored; query will be run as if
* flattenResults is false.
*/
useLegacySql: boolean;
/**
* [Optional] Whether to look for the result in the query cache. The query
* cache is a best-effort cache that will be flushed whenever tables in the
* query are modified. Moreover, the query cache is only available when a
* query does not have a destination table specified. The default value is
* true.
*/
useQueryCache: boolean;
/**
* Describes user-defined function resources used in the query.
*/
userDefinedFunctionResources: Schema$UserDefinedFunctionResource[];
/**
* [Optional] Specifies the action that occurs if the destination table
* already exists. The following values are supported: WRITE_TRUNCATE: If the
* table already exists, BigQuery overwrites the table data and uses the
* schema from the query result. WRITE_APPEND: If the table already exists,
* BigQuery appends the data to the table. WRITE_EMPTY: If the table already
* exists and contains data, a 'duplicate' error is returned in the
* job result. The default value is WRITE_EMPTY. Each action is atomic and
* only occurs if BigQuery is able to complete the job successfully. Creation,
* truncation and append actions occur as one atomic update upon job
* completion.
*/
writeDisposition: string;
}
export interface Schema$JobConfigurationTableCopy {
/**
* [Optional] Specifies whether the job is allowed to create new tables. The
* following values are supported: CREATE_IF_NEEDED: If the table does not
* exist, BigQuery creates the table. CREATE_NEVER: The table must already
* exist. If it does not, a 'notFound' error is returned in the job
* result. The default value is CREATE_IF_NEEDED. Creation, truncation and
* append actions occur as one atomic update upon job completion.
*/
createDisposition: string;
/**
* Custom encryption configuration (e.g., Cloud KMS keys).
*/
destinationEncryptionConfiguration: Schema$EncryptionConfiguration;
/**
* [Required] The destination table
*/
destinationTable: Schema$TableReference;
/**
* [Pick one] Source table to copy.
*/
sourceTable: Schema$TableReference;
/**
* [Pick one] Source tables to copy.
*/
sourceTables: Schema$TableReference[];
/**
* [Optional] Specifies the action that occurs if the destination table
* already exists. The following values are supported: WRITE_TRUNCATE: If the
* table already exists, BigQuery overwrites the table data. WRITE_APPEND: If
* the table already exists, BigQuery appends the data to the table.
* WRITE_EMPTY: If the table already exists and contains data, a
* 'duplicate' error is returned in the job result. The default value
* is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able
* to complete the job successfully. Creation, truncation and append actions
* occur as one atomic update upon job completion.
*/
writeDisposition: string;
}
export interface Schema$JobList {
/**
* A hash of this page of results.
*/
etag: string;
/**
* List of jobs that were requested.
*/
jobs: any[];
/**
* The resource type of the response.
*/
kind: string;
/**
* A token to request the next page of results.
*/
nextPageToken: string;
}
export interface Schema$JobReference {
/**
* [Required] The ID of the job. The ID must contain only letters (a-z, A-Z),
* numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
* characters.
*/
jobId: string;
/**
* [Experimental] The geographic location of the job. Required except for US
* and EU.
*/
location: string;
/**
* [Required] The ID of the project containing this job.
*/
projectId: string;
}
export interface Schema$JobStatistics {
/**
* [Experimental] [Output-only] Job progress (0.0 -> 1.0) for LOAD and
* EXTRACT jobs.
*/
completionRatio: number;
/**
* [Output-only] Creation time of this job, in milliseconds since the epoch.
* This field will be present on all jobs.
*/
creationTime: string;
/**
* [Output-only] End time of this job, in milliseconds since the epoch. This
* field will be present whenever a job is in the DONE state.
*/
endTime: string;
/**
* [Output-only] Statistics for an extract job.
*/
extract: Schema$JobStatistics4;
/**
* [Output-only] Statistics for a load job.
*/
load: Schema$JobStatistics3;
/**
* [Output-only] Statistics for a query job.
*/
query: Schema$JobStatistics2;
/**
* [Output-only] Start time of this job, in milliseconds since the epoch. This
* field will be present when the job transitions from the PENDING state to
* either RUNNING or DONE.
*/
startTime: string;
/**
* [Output-only] [Deprecated] Use the bytes processed in the query statistics
* instead.
*/
totalBytesProcessed: string;
}
export interface Schema$JobStatistics2 {
/**
* [Output-only] Billing tier for the job.
*/
billingTier: number;
/**
* [Output-only] Whether the query result was fetched from the query cache.
*/
cacheHit: boolean;
/**
* [Output-only, Experimental] The DDL operation performed, possibly dependent
* on the pre-existence of the DDL target. Possible values (new values might
* be added in the future): "CREATE": The query created the DDL
* target. "SKIP": No-op. Example cases: the query is CREATE TABLE
* IF NOT EXISTS while the table already exists, or the query is DROP TABLE IF
* EXISTS while the table does not exist. "REPLACE": The query
* replaced the DDL target. Example case: the query is CREATE OR REPLACE
* TABLE, and the table already exists. "DROP": The query deleted
* the DDL target.
*/
ddlOperationPerformed: string;
/**
* [Output-only, Experimental] The DDL target table. Present only for
* CREATE/DROP TABLE/VIEW queries.
*/
ddlTargetTable: Schema$TableReference;
/**
* [Output-only] The original estimate of bytes processed for the job.
*/
estimatedBytesProcessed: string;
/**
* [Output-only] The number of rows affected by a DML statement. Present only
* for DML statements INSERT, UPDATE or DELETE.
*/
numDmlAffectedRows: string;
/**
* [Output-only] Describes execution plan for the query.
*/
queryPlan: Schema$ExplainQueryStage[];
/**
* [Output-only] Referenced tables for the job. Queries that reference more
* than 50 tables will not have a complete list.
*/
referencedTables: Schema$TableReference[];
/**
* [Output-only] The schema of the results. Present only for successful dry
* run of non-legacy SQL queries.
*/
schema: Schema$TableSchema;
/**
* [Output-only, Experimental] The type of query statement, if valid. Possible
* values (new values might be added in the future): "SELECT":
* SELECT query. "INSERT": INSERT query; see
* https://cloud.google.com/bigquery/docs/reference/standard-sql/da