UNPKG

googleapis

Version:
814 lines 129 kB
/** * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { OAuth2Client, JWT, Compute, UserRefreshClient } from 'google-auth-library'; import { GoogleConfigurable, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { GaxiosPromise } from 'gaxios'; export declare namespace ml_v1 { interface Options extends GlobalOptions { version: 'v1'; } interface StandardParameters { /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Cloud Machine Learning Engine * * An API to enable creating and using machine learning models. * * @example * const {google} = require('googleapis'); * const ml = google.ml('v1'); * * @namespace ml * @type {Function} * @version v1 * @variation v1 * @param {object=} options Options for Ml */ class Ml { context: APIRequestContext; projects: Resource$Projects; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can&#39;t be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; } service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); } Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. */ interface Schema$GoogleApi__HttpBody { /** * The HTTP Content-Type header value specifying the content type of the body. */ contentType?: string; /** * The HTTP request/response body as raw binary. */ data?: string; /** * Application specific response metadata. Must be set in the first response for streaming APIs. */ extensions?: Array<{ [key: string]: any; }>; } /** * An observed value of a metric. */ interface Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric { /** * The objective value at this training step. */ objectiveValue?: number; /** * The global training step for this metric. */ trainingStep?: string; } /** * Represents a hardware accelerator request config. */ interface Schema$GoogleCloudMlV1__AcceleratorConfig { /** * The number of accelerators to attach to each machine running the job. */ count?: string; /** * The type of accelerator to use. */ type?: string; } /** * Options for automatically scaling a model. */ interface Schema$GoogleCloudMlV1__AutoScaling { /** * Optional. The minimum number of nodes to allocate for this model. These nodes are always up, starting from the time the model is deployed. Therefore, the cost of operating this model will be at least `rate` * `min_nodes` * number of hours since last billing cycle, where `rate` is the cost per node-hour as documented in the [pricing guide](/ml-engine/docs/pricing), even if no predictions are performed. There is additional cost for each prediction performed. Unlike manual scaling, if the load gets too heavy for the nodes that are up, the service will automatically add nodes to handle the increased load as well as scale back as traffic drops, always maintaining at least `min_nodes`. You will be charged for the time in which additional nodes are used. If not specified, `min_nodes` defaults to 0, in which case, when traffic to a model stops (and after a cool-down period), nodes will be shut down and no charges will be incurred until traffic to the model resumes. You can set `min_nodes` when creating the model version, and you can also update `min_nodes` for an existing version: &lt;pre&gt; update_body.json: { &#39;autoScaling&#39;: { &#39;minNodes&#39;: 5 } } &lt;/pre&gt; HTTP request: &lt;pre&gt; PATCH https://ml.googleapis.com/v1/{name=projects/x/models/x/versions/*}?update_mask=autoScaling.minNodes -d @./update_body.json &lt;/pre&gt; */ minNodes?: number; } /** * Represents output related to a built-in algorithm Job. */ interface Schema$GoogleCloudMlV1__BuiltInAlgorithmOutput { /** * Framework on which the built-in algorithm was trained. */ framework?: string; /** * The Cloud Storage path to the `model/` directory where the training job saves the trained model. Only set for successful jobs that don&#39;t use hyperparameter tuning. */ modelPath?: string; /** * Python version on which the built-in algorithm was trained. */ pythonVersion?: string; /** * AI Platform runtime version on which the built-in algorithm was trained. */ runtimeVersion?: string; } /** * Request message for the CancelJob method. */ interface Schema$GoogleCloudMlV1__CancelJobRequest { } interface Schema$GoogleCloudMlV1__Capability { /** * Available accelerators for the capability. */ availableAccelerators?: string[]; type?: string; } interface Schema$GoogleCloudMlV1__Config { /** * The service account Cloud ML uses to run on TPU node. */ tpuServiceAccount?: string; } /** * Returns service account information associated with a project. */ interface Schema$GoogleCloudMlV1__GetConfigResponse { config?: Schema$GoogleCloudMlV1__Config; /** * The service account Cloud ML uses to access resources in the project. */ serviceAccount?: string; /** * The project number for `service_account`. */ serviceAccountProject?: string; } /** * Represents the result of a single hyperparameter tuning trial from a training job. The TrainingOutput object that is returned on successful completion of a training job with hyperparameter tuning includes a list of HyperparameterOutput objects, one for each successful trial. */ interface Schema$GoogleCloudMlV1__HyperparameterOutput { /** * All recorded object metrics for this trial. This field is not currently populated. */ allMetrics?: Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric[]; /** * Details related to built-in algorithms jobs. Only set for trials of built-in algorithms jobs that have succeeded. */ builtInAlgorithmOutput?: Schema$GoogleCloudMlV1__BuiltInAlgorithmOutput; /** * Output only. End time for the trial. */ endTime?: string; /** * The final objective metric seen for this trial. */ finalMetric?: Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric; /** * The hyperparameters given to this trial. */ hyperparameters?: { [key: string]: string; }; /** * True if the trial is stopped early. */ isTrialStoppedEarly?: boolean; /** * Output only. Start time for the trial. */ startTime?: string; /** * Output only. The detailed state of the trial. */ state?: string; /** * The trial id for these results. */ trialId?: string; } /** * Represents a set of hyperparameters to optimize. */ interface Schema$GoogleCloudMlV1__HyperparameterSpec { /** * Optional. The search algorithm specified for the hyperparameter tuning job. Uses the default AI Platform hyperparameter tuning algorithm if unspecified. */ algorithm?: string; /** * Optional. Indicates if the hyperparameter tuning job enables auto trial early stopping. */ enableTrialEarlyStopping?: boolean; /** * Required. The type of goal to use for tuning. Available types are `MAXIMIZE` and `MINIMIZE`. Defaults to `MAXIMIZE`. */ goal?: string; /** * Optional. The TensorFlow summary tag name to use for optimizing trials. For current versions of TensorFlow, this tag name should exactly match what is shown in TensorBoard, including all scopes. For versions of TensorFlow prior to 0.12, this should be only the tag passed to tf.Summary. By default, &quot;training/hptuning/metric&quot; will be used. */ hyperparameterMetricTag?: string; /** * Optional. The number of failed trials that need to be seen before failing the hyperparameter tuning job. You can specify this field to override the default failing criteria for AI Platform hyperparameter tuning jobs. Defaults to zero, which means the service decides when a hyperparameter job should fail. */ maxFailedTrials?: number; /** * Optional. The number of training trials to run concurrently. You can reduce the time it takes to perform hyperparameter tuning by adding trials in parallel. However, each trail only benefits from the information gained in completed trials. That means that a trial does not get access to the results of trials running at the same time, which could reduce the quality of the overall optimization. Each trial will use the same scale tier and machine types. Defaults to one. */ maxParallelTrials?: number; /** * Optional. How many training trials should be attempted to optimize the specified hyperparameters. Defaults to one. */ maxTrials?: number; /** * Required. The set of parameters to tune. */ params?: Schema$GoogleCloudMlV1__ParameterSpec[]; /** * Optional. The prior hyperparameter tuning job id that users hope to continue with. The job id will be used to find the corresponding vizier study guid and resume the study. */ resumePreviousJobId?: string; } /** * Represents a training or prediction job. */ interface Schema$GoogleCloudMlV1__Job { /** * Output only. When the job was created. */ createTime?: string; /** * Output only. When the job processing was completed. */ endTime?: string; /** * Output only. The details of a failure or a cancellation. */ errorMessage?: string; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a job from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform job updates in order to avoid race conditions: An `etag` is returned in the response to `GetJob`, and systems are expected to put that etag in the request to `UpdateJob` to ensure that their change will be applied to the same version of the job. */ etag?: string; /** * Required. The user-specified id of the job. */ jobId?: string; /** * Optional. One or more labels that you can add, to organize your jobs. Each label is a key-value pair, where both the key and the value are arbitrary strings that you supply. For more information, see the documentation on &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;. */ labels?: { [key: string]: string; }; /** * Input parameters to create a prediction job. */ predictionInput?: Schema$GoogleCloudMlV1__PredictionInput; /** * The current prediction job result. */ predictionOutput?: Schema$GoogleCloudMlV1__PredictionOutput; /** * Output only. When the job processing was started. */ startTime?: string; /** * Output only. The detailed state of a job. */ state?: string; /** * Input parameters to create a training job. */ trainingInput?: Schema$GoogleCloudMlV1__TrainingInput; /** * The current training job result. */ trainingOutput?: Schema$GoogleCloudMlV1__TrainingOutput; } /** * Response message for the ListJobs method. */ interface Schema$GoogleCloudMlV1__ListJobsResponse { /** * The list of jobs. */ jobs?: Schema$GoogleCloudMlV1__Job[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string; } interface Schema$GoogleCloudMlV1__ListLocationsResponse { /** * Locations where at least one type of CMLE capability is available. */ locations?: Schema$GoogleCloudMlV1__Location[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string; } /** * Response message for the ListModels method. */ interface Schema$GoogleCloudMlV1__ListModelsResponse { /** * The list of models. */ models?: Schema$GoogleCloudMlV1__Model[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string; } /** * Response message for the ListVersions method. */ interface Schema$GoogleCloudMlV1__ListVersionsResponse { /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string; /** * The list of versions. */ versions?: Schema$GoogleCloudMlV1__Version[]; } interface Schema$GoogleCloudMlV1__Location { /** * Capabilities available in the location. */ capabilities?: Schema$GoogleCloudMlV1__Capability[]; name?: string; } /** * Options for manually scaling a model. */ interface Schema$GoogleCloudMlV1__ManualScaling { /** * The number of nodes to allocate for this model. These nodes are always up, starting from the time the model is deployed, so the cost of operating this model will be proportional to `nodes` * number of hours since last billing cycle plus the cost for each prediction performed. */ nodes?: number; } /** * Represents a machine learning solution. A model can have multiple versions, each of which is a deployed, trained model ready to receive prediction requests. The model itself is just a container. */ interface Schema$GoogleCloudMlV1__Model { /** * Output only. The default version of the model. This version will be used to handle prediction requests that do not specify a version. You can change the default version by calling [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault). */ defaultVersion?: Schema$GoogleCloudMlV1__Version; /** * Optional. The description specified for the model when it was created. */ description?: string; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a model from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform model updates in order to avoid race conditions: An `etag` is returned in the response to `GetModel`, and systems are expected to put that etag in the request to `UpdateModel` to ensure that their change will be applied to the model as intended. */ etag?: string; /** * Optional. One or more labels that you can add, to organize your models. Each label is a key-value pair, where both the key and the value are arbitrary strings that you supply. For more information, see the documentation on &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;. */ labels?: { [key: string]: string; }; /** * Required. The name specified for the model when it was created. The model name must be unique within the project it is created in. */ name?: string; /** * Optional. If true, online prediction nodes send `stderr` and `stdout` streams to Stackdriver Logging. These can be more verbose than the standard access logs (see `onlinePredictionLogging`) and can incur higher cost. However, they are helpful for debugging. Note that [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if your project receives prediction requests at a high QPS. Estimate your costs before enabling this option. Default is false. */ onlinePredictionConsoleLogging?: boolean; /** * Optional. If true, online prediction access logs are sent to StackDriver Logging. These logs are like standard server access logs, containing information like timestamp and latency for each request. Note that [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. Default is false. */ onlinePredictionLogging?: boolean; /** * Optional. The list of regions where the model is going to be deployed. Currently only one region per model is supported. Defaults to &#39;us-central1&#39; if nothing is set. See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt; for AI Platform services. Note: * No matter where a model is deployed, it can always be accessed by users from anywhere, both for online and batch prediction. * The region for a batch prediction job is set by the region field when submitting the batch prediction job and does not take its value from this field. */ regions?: string[]; } /** * Represents the metadata of the long-running operation. */ interface Schema$GoogleCloudMlV1__OperationMetadata { /** * The time the operation was submitted. */ createTime?: string; /** * The time operation processing completed. */ endTime?: string; /** * Indicates whether a request to cancel this operation has been made. */ isCancellationRequested?: boolean; /** * The user labels, inherited from the model or the model version being operated on. */ labels?: { [key: string]: string; }; /** * Contains the name of the model associated with the operation. */ modelName?: string; /** * The operation type. */ operationType?: string; /** * Contains the project number associated with the operation. */ projectNumber?: string; /** * The time operation processing started. */ startTime?: string; /** * Contains the version associated with the operation. */ version?: Schema$GoogleCloudMlV1__Version; } /** * Represents a single hyperparameter to optimize. */ interface Schema$GoogleCloudMlV1__ParameterSpec { /** * Required if type is `CATEGORICAL`. The list of possible categories. */ categoricalValues?: string[]; /** * Required if type is `DISCRETE`. A list of feasible points. The list should be in strictly increasing order. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. */ discreteValues?: number[]; /** * Required if type is `DOUBLE` or `INTEGER`. This field should be unset if type is `CATEGORICAL`. This value should be integers if type is `INTEGER`. */ maxValue?: number; /** * Required if type is `DOUBLE` or `INTEGER`. This field should be unset if type is `CATEGORICAL`. This value should be integers if type is INTEGER. */ minValue?: number; /** * Required. The parameter name must be unique amongst all ParameterConfigs in a HyperparameterSpec message. E.g., &quot;learning_rate&quot;. */ parameterName?: string; /** * Optional. How the parameter should be scaled to the hypercube. Leave unset for categorical parameters. Some kind of scaling is strongly recommended for real or integral parameters (e.g., `UNIT_LINEAR_SCALE`). */ scaleType?: string; /** * Required. The type of the parameter. */ type?: string; } /** * Represents input parameters for a prediction job. */ interface Schema$GoogleCloudMlV1__PredictionInput { /** * Optional. Number of records per batch, defaults to 64. The service will buffer batch_size number of records in memory before invoking one Tensorflow prediction call internally. So take the record size and memory available into consideration when setting this parameter. */ batchSize?: string; /** * Required. The format of the input data files. */ dataFormat?: string; /** * Required. The Cloud Storage location of the input data files. May contain &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;. */ inputPaths?: string[]; /** * Optional. The maximum number of workers to be used for parallel processing. Defaults to 10 if not specified. */ maxWorkerCount?: string; /** * Use this field if you want to use the default version for the specified model. The string must use the following format: `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;` */ modelName?: string; /** * Optional. Format of the output data files, defaults to JSON. */ outputDataFormat?: string; /** * Required. The output Google Cloud Storage location. */ outputPath?: string; /** * Required. The Google Compute Engine region to run the prediction job in. See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt; for AI Platform services. */ region?: string; /** * Optional. The AI Platform runtime version to use for this batch prediction. If not set, AI Platform will pick the runtime version used during the CreateVersion request for this model version, or choose the latest stable version when model version information is not available such as when the model is specified by uri. */ runtimeVersion?: string; /** * Optional. The name of the signature defined in the SavedModel to use for this job. Please refer to [SavedModel](https://tensorflow.github.io/serving/serving_basic.html) for information about how to use signatures. Defaults to [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants) , which is &quot;serving_default&quot;. */ signatureName?: string; /** * Use this field if you want to specify a Google Cloud Storage path for the model to use. */ uri?: string; /** * Use this field if you want to specify a version of the model to use. The string is formatted the same way as `model_version`, with the addition of the version information: `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;` */ versionName?: string; } /** * Represents results of a prediction job. */ interface Schema$GoogleCloudMlV1__PredictionOutput { /** * The number of data instances which resulted in errors. */ errorCount?: string; /** * Node hours used by the batch prediction job. */ nodeHours?: number; /** * The output Google Cloud Storage location provided at the job creation time. */ outputPath?: string; /** * The number of generated predictions. */ predictionCount?: string; } /** * Request for predictions to be issued against a trained model. */ interface Schema$GoogleCloudMlV1__PredictRequest { /** * Required. The prediction request body. */ httpBody?: Schema$GoogleApi__HttpBody; } /** * Represents the configuration for a replica in a cluster. */ interface Schema$GoogleCloudMlV1__ReplicaConfig { /** * Represents the type and number of accelerators used by the replica. [Learn about restrictions on accelerator configurations for training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu) */ acceleratorConfig?: Schema$GoogleCloudMlV1__AcceleratorConfig; /** * The Docker image to run on the replica. This image must be in Container Registry. Learn more about [configuring custom containers](/ml-engine/docs/distributed-training-containers). */ imageUri?: string; /** * TensorFlow version used in the custom container. This field is required if the replica is a TPU worker that uses a custom container. Otherwise, do not specify this field. */ tpuTfVersion?: string; } /** * Request message for the SetDefaultVersion request. */ interface Schema$GoogleCloudMlV1__SetDefaultVersionRequest { } /** * Represents input parameters for a training job. When using the gcloud command to submit your training job, you can specify the input parameters as command-line arguments and/or in a YAML configuration file referenced from the --config command-line argument. For details, see the guide to &lt;a href=&quot;/ml-engine/docs/tensorflow/training-jobs&quot;&gt;submitting a training job&lt;/a&gt;. */ interface Schema$GoogleCloudMlV1__TrainingInput { /** * Optional. Command line arguments to pass to the program. */ args?: string[]; /** * Optional. The set of Hyperparameters to tune. */ hyperparameters?: Schema$GoogleCloudMlV1__HyperparameterSpec; /** * Optional. A Google Cloud Storage path in which to store training outputs and other data needed for training. This path is passed to your TensorFlow program as the &#39;--job-dir&#39; command-line argument. The benefit of specifying this field is that Cloud ML validates the path for use in training. */ jobDir?: string; /** * Optional. The configuration for your master worker. You should only set `masterConfig.acceleratorConfig` if `masterType` is set to a Compute Engine machine type. Learn about [restrictions on accelerator configurations for training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu) Set `masterConfig.imageUri` only if you build a custom image. Only one of `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about [configuring custom containers](/ml-engine/docs/distributed-training-containers). */ masterConfig?: Schema$GoogleCloudMlV1__ReplicaConfig; /** * Optional. Specifies the type of virtual machine to use for your training job&#39;s master worker. The following types are supported: &lt;dl&gt; &lt;dt&gt;standard&lt;/dt&gt; &lt;dd&gt; A basic machine configuration suitable for training simple models with small to moderate datasets. &lt;/dd&gt; &lt;dt&gt;large_model&lt;/dt&gt; &lt;dd&gt; A machine with a lot of memory, specially suited for parameter servers when your model is large (having many hidden layers or layers with very large numbers of nodes). &lt;/dd&gt; &lt;dt&gt;complex_model_s&lt;/dt&gt; &lt;dd&gt; A machine suitable for the master and workers of the cluster when your model requires more computation than the standard machine can handle satisfactorily. &lt;/dd&gt; &lt;dt&gt;complex_model_m&lt;/dt&gt; &lt;dd&gt; A machine with roughly twice the number of cores and roughly double the memory of &lt;i&gt;complex_model_s&lt;/i&gt;. &lt;/dd&gt; &lt;dt&gt;complex_model_l&lt;/dt&gt; &lt;dd&gt; A machine with roughly twice the number of cores and roughly double the memory of &lt;i&gt;complex_model_m&lt;/i&gt;. &lt;/dd&gt; &lt;dt&gt;standard_gpu&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;standard&lt;/i&gt; that also includes a single NVIDIA Tesla K80 GPU. See more about &lt;a href=&quot;/ml-engine/docs/tensorflow/using-gpus&quot;&gt;using GPUs to train your model&lt;/a&gt;. &lt;/dd&gt; &lt;dt&gt;complex_model_m_gpu&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;complex_model_m&lt;/i&gt; that also includes four NVIDIA Tesla K80 GPUs. &lt;/dd&gt; &lt;dt&gt;complex_model_l_gpu&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;complex_model_l&lt;/i&gt; that also includes eight NVIDIA Tesla K80 GPUs. &lt;/dd&gt; &lt;dt&gt;standard_p100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;standard&lt;/i&gt; that also includes a single NVIDIA Tesla P100 GPU. &lt;/dd&gt; &lt;dt&gt;complex_model_m_p100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;complex_model_m&lt;/i&gt; that also includes four NVIDIA Tesla P100 GPUs. &lt;/dd&gt; &lt;dt&gt;standard_v100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;standard&lt;/i&gt; that also includes a single NVIDIA Tesla V100 GPU. &lt;/dd&gt; &lt;dt&gt;large_model_v100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;large_model&lt;/i&gt; that also includes a single NVIDIA Tesla V100 GPU. &lt;/dd&gt; &lt;dt&gt;complex_model_m_v100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;complex_model_m&lt;/i&gt; that also includes four NVIDIA Tesla V100 GPUs. &lt;/dd&gt; &lt;dt&gt;complex_model_l_v100&lt;/dt&gt; &lt;dd&gt; A machine equivalent to &lt;i&gt;complex_model_l&lt;/i&gt; that also includes eight NVIDIA Tesla V100 GPUs. &lt;/dd&gt; &lt;dt&gt;cloud_tpu&lt;/dt&gt; &lt;dd&gt; A TPU VM including one Cloud TPU. See more about &lt;a href=&quot;/ml-engine/docs/tensorflow/using-tpus&quot;&gt;using TPUs to train your model&lt;/a&gt;. &lt;/dd&gt; &lt;/dl&gt; You may also use certain Compute Engine machine types directly in this field. The following types are supported: - `n1-standard-4` - `n1-standard-8` - `n1-standard-16` - `n1-standard-32` - `n1-standard-64` - `n1-standard-96` - `n1-highmem-2` - `n1-highmem-4` - `n1-highmem-8` - `n1-highmem-16` - `n1-highmem-32` - `n1-highmem-64` - `n1-highmem-96` - `n1-highcpu-16` - `n1-highcpu-32` - `n1-highcpu-64` - `n1-highcpu-96` See more about [using Compute Engine machine types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types). You must set this value when `scaleTier` is set to `CUSTOM`. */ masterType?: string; /** * Optional. The maximum job running time. The default is 7 days. */ maxRunningTime?: string; /** * Required. The Google Cloud Storage location of the packages with the training program and any additional dependencies. The maximum number of package URIs is 100. */ packageUris?: string[]; /** * Optional. The configuration for parameter servers. You should only set `parameterServerConfig.acceleratorConfig` if `parameterServerConfigType` is set to a Compute Engine machine type. [Learn about restrictions on accelerator configurations for training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu) Set `parameterServerConfig.imageUri` only if you build a custom image for your parameter server. If `parameterServerConfig.imageUri` has not been set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom containers](/ml-engine/docs/distributed-training-containers). */ parameterServerConfig?: Schema$GoogleCloudMlV1__ReplicaConfig; /** * Optional. The number of parameter server replicas to use for the training job. Each replica in the cluster will be of the type specified in `parameter_server_type`. This value can only be used when `scale_tier` is set to `CUSTOM`.If you set this value, you must also set `parameter_server_type`. The default value is zero. */ parameterServerCount?: string; /** * Optional. Specifies the type of virtual machine to use for your training job&#39;s parameter server. The supported values are the same as those described in the entry for `master_type`. This value must be consistent with the category of machine type that `masterType` uses. In other words, both must be AI Platform machine types or both must be Compute Engine machine types. This value must be present when `scaleTier` is set to `CUSTOM` and `parameter_server_count` is greater than zero. */ parameterServerType?: string; /** * Required. The Python module name to run after installing the packages. */ pythonModule?: string; /** * Optional. The version of Python used in training. If not set, the default version is &#39;2.7&#39;. Python &#39;3.5&#39; is available when `runtime_version` is set to &#39;1.4&#39; and above. Python &#39;2.7&#39; works with all supported &lt;a href=&quot;/ml-engine/docs/runtime-version-list&quot;&gt;runtime versions&lt;/a&gt;. */ pythonVersion?: string; /** * Required. The Google Compute Engine region to run the training job in. See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt; for AI Platform services. */ region?: string; /** * Optional. The AI Platform runtime version to use for training. If not set, AI Platform uses the default stable version, 1.0. For more information, see the &lt;a href=&quot;/ml-engine/docs/runtime-version-list&quot;&gt;runtime version list&lt;/a&gt; and &lt;a href=&quot;/ml-engine/docs/versioning&quot;&gt;how to manage runtime versions&lt;/a&gt;. */ runtimeVersion?: string; /** * Required. Specifies the machine types, the number of replicas for workers and parameter servers. */ scaleTier?: string; /** * Optional. The configuration for workers. You should only set `workerConfig.acceleratorConfig` if `workerType` is set to a Compute Engine machine type. [Learn about restrictions on accelerator configurations for training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu) Set `workerConfig.imageUri` only if you build a custom image for your worker. If `workerConfig.imageUri` has not been set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom containers](/ml-engine/docs/distributed-training-containers). */ workerConfig?: Schema$GoogleCloudMlV1__ReplicaConfig; /** * Optional. The number of worker replicas to use for the training job. Each replica in the cluster will be of the type specified in `worker_type`. This value can only be used when `scale_tier` is set to `CUSTOM`. If you set this value, you must also set `worker_type`. The default value is zero. */ workerCount?: string; /** * Optional. Specifies the type of virtual machine to use for your training job&#39;s worker nodes. The supported values are the same as those described in the entry for `masterType`. This value must be consistent with the category of machine type that `masterType` uses. In other words, both must be AI Platform machine types or both must be Compute Engine machine types. If you use `cloud_tpu` for this value, see special instructions for [configuring a custom TPU machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine). This value must be present when `scaleTier` is set to `CUSTOM` and `workerCount` is greater than zero. */ workerType?: string; } /** * Represents results of a training job. Output only. */ interface Schema$GoogleCloudMlV1__TrainingOutput { /** * Details related to built-in algorithms jobs. Only set for built-in algorithms jobs. */ builtInAlgorithmOutput?: Schema$GoogleCloudMlV1__BuiltInAlgorithmOutput; /** * The number of hyperparameter tuning trials that completed successfully. Only set for hyperparameter tuning jobs. */ completedTrialCount?: string; /** * The amount of ML units consumed by the job. */ consumedMLUnits?: number; /** * The TensorFlow summary tag name used for optimizing hyperparameter tuning trials. See [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag) for more information. Only set for hyperparameter tuning jobs. */ hyperparameterMetricTag?: string; /** * Whether this job is a built-in Algorithm job. */ isBuiltInAlgorithmJob?: boolean; /** * Whether this job is a hyperparameter tuning job. */ isHyperparameterTuningJob?: boolean; /** * Results for individual Hyperparameter trials. Only set for hyperparameter tuning jobs. */ trials?: Schema$GoogleCloudMlV1__HyperparameterOutput[]; } /** * Represents a version of the model. Each version is a trained model deployed in the cloud, ready to handle prediction requests. A model can have multiple versions. You can get information about all of the versions of a given model by calling [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list). */ interface Schema$GoogleCloudMlV1__Version { /** * Automatically scale the number of nodes used to serve the model in response to increases and decreases in traffic. Care should be taken to ramp up traffic according to the model&#39;s ability to scale or you will start seeing increases in latency and 429 response codes. */ autoScaling?: Schema$GoogleCloudMlV1__AutoScaling; /** * Output only. The time the version was created. */ createTime?: string; /** * Required. The Cloud Storage location of the trained model used to create the version. See the [guide to model deployment](/ml-engine/docs/tensorflow/deploying-models) for more information. When passing Version to [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create) the model service uses the specified location as the source of the model. Once deployed, the model version is hosted by the prediction service, so this location is useful only as a historical record. The total number of model files can&#39;t exceed 1000. */ deploymentUri?: string; /** * Optional. The description specified for the version when it was created. */ description?: string; /** * Output only. The details of a failure or a cancellation. */ errorMessage?: string; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a model from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform model updates in order to avoid race conditions: An `etag` is returned in the response to `GetVersion`, and systems are expected to put that etag in the request to `UpdateVersion` to ensure that their change will be applied to the model as intended. */ etag?: string; /** * Optional. The machine learning framework AI Platform uses to train this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`, `XGBOOST`. If you do not specify a framework, AI Platform will analyze files in the deployment_uri to determine a framework. If you choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version of the model to 1.4 or greater. Do **not** specify a framework if you&#39;re deploying a [custom prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines). */ framework?: string; /** * Output only. If true, this version will be used to handle prediction requests that do not specify a version. You can change the default version by calling [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault). */ isDefault?: boolean; /** * Optional. One or more labels that you can add, to organize your model versions. Each label is a key-value pair, where both the key and the value are arbitrary strings that you supply. For more information, see the documentation on &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;. */ labels?: { [key: string]: string; }; /** * Output only. The time the version was last used for prediction. */ lastUseTime?: string; /** * Optional. The type of machine on which to serve the model. Currently only applies to online prediction service. &lt;dl&gt; &lt;dt&gt;mls1-c1-m2&lt;/dt&gt; &lt;dd&gt; The &lt;b&gt;default&lt;/b&gt; machine type, with 1 core and 2 GB RAM. The deprecated name for this machine type is &quot;mls1-highmem-1&quot;. &lt;/dd&gt; &lt;dt&gt;mls1-c4-m2&lt;/dt&gt; &lt;dd&gt; In &lt;b&gt;Beta&lt;/b&gt;. This machine type has 4 cores and 2 GB RAM. The deprecated name for this machine type is &quot;mls1-highcpu-4&quot;. &lt;/dd&gt; &lt;/dl&gt; */ machineType?: string; /** * Manually select the number of nodes to use for serving the model. You should generally use `auto_scaling` with an appropriate `min_nodes` instead, but this option is available if you want more predictable billing. Beware that latency and error rates will increase if the traffic exceeds that capability of the system to serve it based on the selected number of nodes. */ manualScaling?: Schema$GoogleCloudMlV1__ManualScaling; /** * Required. The name specified for the version when it was created. The version name must be unique within the model it is created in. */ name?: string; /** * Optional. Cloud Storage paths (`gs://…`) of packages for [custom prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines) or [scikit-learn pipelines with custom code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code). For a custom prediction routine, one of these packages must contain your Predictor class (see [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally, include any dependencies used by your Predictor or scikit-learn pipeline uses that are not already included in your selected [runtime version](/ml-engine/docs/tensorflow/runtime-version-list). If you specify this field, you must also set [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater. */ packageUris?: string[]; /** * Optional. The fully qualified name (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements the Predictor interface described in this reference field. The module containing this class should be included in a package provided to the [`packageUris` field](#Version.FIELDS.package_uris). Specify this field if and only if you are deploying a [custom prediction routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines). If you specify this field, you must set [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater. The following code sample provides the Predictor interface: ```py class Predictor(object): &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot; def predict(self, instances, **kwargs): &quot;&quot;&quot;Performs custom prediction. Instances are the decoded values from the request. They have already been deserialized from JSON. Args: instances: A list of prediction input instances. **kwargs: A dictionary of keyword args provided as additional fields on the predict request body. Returns: A list of outputs containing the prediction results. This list must be JSON serializable. &quot;&quot;&quot; raise NotImplementedError() @classmethod def from_path(cls, model_dir): &quot;&quot;&quot;Creates an instance of Predictor using the given path. Loading of the predictor should be done in this method. Args: model_dir: The local directory that contains the exported model file along with any additional files uploaded when creating the version resource. Returns: An instance implementing this Predictor class. &quot;&quot;&quot; raise NotImplementedError() ``` Learn more about [the Predictor interface and custom prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines). */ predictionClass?: string; /** * Optional. The version of Python used in prediction. If not set, the default version is &#39;2.7&#39;. Python &#39;3.5&#39; is available when `runtime_version` is set to &#39;1.4&#39; and above. Python &#39;2.7&#39; works with all supported runtime versions. */ pythonVersion?: string; /** * Optional. The AI Platform runtime version to use for this deployment. If not set, AI Platform uses the default stable version, 1.0. For more information, see the [runtime version list](/ml-engine/docs/runtime-version-list) and [how to manage runtime versions](/ml-engine/docs/versioning). */ runtimeVersion?: string; /** * Optional. Specifies the service account for resource access control. */ serviceAccount?: string; /** * Output only. The state of a version. */ state?: string; } /** * Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or mor