UNPKG

googleapis

Version:
906 lines 204 kB
/// <reference types="node" /> import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosPromise, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { Readable } from 'stream'; export declare namespace ml_v1 { export interface Options extends GlobalOptions { version: 'v1'; } interface StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth; /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * AI Platform Training &amp; Prediction API * * An API to enable creating and using machine learning models. * * @example * ```js * const {google} = require('googleapis'); * const ml = google.ml('v1'); * ``` */ export class Ml { context: APIRequestContext; projects: Resource$Projects; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * Message that represents an arbitrary HTTP body. It should only be used for payload formats that can't be represented as JSON, such as raw binary or an HTML page. This message can be used both in streaming and non-streaming API methods in the request as well as the response. It can be used as a top-level request field, which is convenient if one wants to extract parameters from either the URL or HTTP template into the request fields and also want access to the raw HTTP body. Example: message GetResourceRequest { // A unique request id. string request_id = 1; // The raw HTTP body is bound to this field. google.api.HttpBody http_body = 2; \} service ResourceService { rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); \} Example with streaming methods: service CaldavService { rpc GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); rpc UpdateCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); \} Use of this type only changes how the request and response bodies are handled, all other features will continue to work unchanged. */ export interface Schema$GoogleApi__HttpBody { /** * The HTTP Content-Type header value specifying the content type of the body. */ contentType?: string | null; /** * The HTTP request/response body as raw binary. */ data?: string | null; /** * Application specific response metadata. Must be set in the first response for streaming APIs. */ extensions?: Array<{ [key: string]: any; }> | null; } export interface Schema$GoogleCloudMlV1_AutomatedStoppingConfig_DecayCurveAutomatedStoppingConfig { /** * If true, measurement.elapsed_time is used as the x-axis of each Trials Decay Curve. Otherwise, Measurement.steps will be used as the x-axis. */ useElapsedTime?: boolean | null; } /** * The median automated stopping rule stops a pending trial if the trial's best objective_value is strictly below the median 'performance' of all completed trials reported up to the trial's last measurement. Currently, 'performance' refers to the running average of the objective values reported by the trial in each measurement. */ export interface Schema$GoogleCloudMlV1_AutomatedStoppingConfig_MedianAutomatedStoppingConfig { /** * If true, the median automated stopping rule applies to measurement.use_elapsed_time, which means the elapsed_time field of the current trial's latest measurement is used to compute the median objective value for each completed trial. */ useElapsedTime?: boolean | null; } /** * An observed value of a metric. */ export interface Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric { /** * The objective value at this training step. */ objectiveValue?: number | null; /** * The global training step for this metric. */ trainingStep?: string | null; } /** * A message representing a metric in the measurement. */ export interface Schema$GoogleCloudMlV1_Measurement_Metric { /** * Required. Metric name. */ metric?: string | null; /** * Required. The value for this metric. */ value?: number | null; } export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_CategoricalValueSpec { /** * Must be specified if type is `CATEGORICAL`. The list of possible categories. */ values?: string[] | null; } export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_DiscreteValueSpec { /** * Must be specified if type is `DISCRETE`. A list of feasible points. The list should be in strictly increasing order. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. */ values?: number[] | null; } export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_DoubleValueSpec { /** * Must be specified if type is `DOUBLE`. Maximum value of the parameter. */ maxValue?: number | null; /** * Must be specified if type is `DOUBLE`. Minimum value of the parameter. */ minValue?: number | null; } export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_IntegerValueSpec { /** * Must be specified if type is `INTEGER`. Maximum value of the parameter. */ maxValue?: string | null; /** * Must be specified if type is `INTEGER`. Minimum value of the parameter. */ minValue?: string | null; } /** * Represents the spec to match categorical values from parent parameter. */ export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentCategoricalValueSpec { /** * Matches values of the parent parameter with type 'CATEGORICAL'. All values must exist in `categorical_value_spec` of parent parameter. */ values?: string[] | null; } /** * Represents the spec to match discrete values from parent parameter. */ export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec { /** * Matches values of the parent parameter with type 'DISCRETE'. All values must exist in `discrete_value_spec` of parent parameter. */ values?: number[] | null; } /** * Represents the spec to match integer values from parent parameter. */ export interface Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentIntValueSpec { /** * Matches values of the parent parameter with type 'INTEGER'. All values must lie in `integer_value_spec` of parent parameter. */ values?: string[] | null; } /** * Represents a metric to optimize. */ export interface Schema$GoogleCloudMlV1_StudyConfig_MetricSpec { /** * Required. The optimization goal of the metric. */ goal?: string | null; /** * Required. The name of the metric. */ metric?: string | null; } /** * Represents a single parameter to optimize. */ export interface Schema$GoogleCloudMlV1_StudyConfig_ParameterSpec { /** * The value spec for a 'CATEGORICAL' parameter. */ categoricalValueSpec?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_CategoricalValueSpec; /** * A child node is active if the parameter's value matches the child node's matching_parent_values. If two items in child_parameter_specs have the same name, they must have disjoint matching_parent_values. */ childParameterSpecs?: Schema$GoogleCloudMlV1_StudyConfig_ParameterSpec[]; /** * The value spec for a 'DISCRETE' parameter. */ discreteValueSpec?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_DiscreteValueSpec; /** * The value spec for a 'DOUBLE' parameter. */ doubleValueSpec?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_DoubleValueSpec; /** * The value spec for an 'INTEGER' parameter. */ integerValueSpec?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_IntegerValueSpec; /** * Required. The parameter name must be unique amongst all ParameterSpecs. */ parameter?: string | null; parentCategoricalValues?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentCategoricalValueSpec; parentDiscreteValues?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec; parentIntValues?: Schema$GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentIntValueSpec; /** * How the parameter should be scaled. Leave unset for categorical parameters. */ scaleType?: string | null; /** * Required. The type of the parameter. */ type?: string | null; } /** * A message representing a parameter to be tuned. Contains the name of the parameter and the suggested value to use for this trial. */ export interface Schema$GoogleCloudMlV1_Trial_Parameter { /** * Must be set if ParameterType is DOUBLE or DISCRETE. */ floatValue?: number | null; /** * Must be set if ParameterType is INTEGER */ intValue?: string | null; /** * The name of the parameter. */ parameter?: string | null; /** * Must be set if ParameterTypeis CATEGORICAL */ stringValue?: string | null; } /** * Represents a hardware accelerator request config. Note that the AcceleratorConfig can be used in both Jobs and Versions. Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and [accelerators for online prediction](/ml-engine/docs/machine-types-online-prediction#gpus). */ export interface Schema$GoogleCloudMlV1__AcceleratorConfig { /** * The number of accelerators to attach to each machine running the job. */ count?: string | null; /** * The type of accelerator to use. */ type?: string | null; } /** * The request message for the AddTrialMeasurement service method. */ export interface Schema$GoogleCloudMlV1__AddTrialMeasurementRequest { /** * Required. The measurement to be added to a trial. */ measurement?: Schema$GoogleCloudMlV1__Measurement; } /** * Configuration for Automated Early Stopping of Trials. If no implementation_config is set, automated early stopping will not be run. */ export interface Schema$GoogleCloudMlV1__AutomatedStoppingConfig { decayCurveStoppingConfig?: Schema$GoogleCloudMlV1_AutomatedStoppingConfig_DecayCurveAutomatedStoppingConfig; medianAutomatedStoppingConfig?: Schema$GoogleCloudMlV1_AutomatedStoppingConfig_MedianAutomatedStoppingConfig; } /** * Options for automatically scaling a model. */ export interface Schema$GoogleCloudMlV1__AutoScaling { /** * The maximum number of nodes to scale this model under load. The actual value will depend on resource quota and availability. */ maxNodes?: number | null; /** * MetricSpec contains the specifications to use to calculate the desired nodes count. */ metrics?: Schema$GoogleCloudMlV1__MetricSpec[]; /** * Optional. The minimum number of nodes to allocate for this model. These nodes are always up, starting from the time the model is deployed. Therefore, the cost of operating this model will be at least `rate` * `min_nodes` * number of hours since last billing cycle, where `rate` is the cost per node-hour as documented in the [pricing guide](/ml-engine/docs/pricing), even if no predictions are performed. There is additional cost for each prediction performed. Unlike manual scaling, if the load gets too heavy for the nodes that are up, the service will automatically add nodes to handle the increased load as well as scale back as traffic drops, always maintaining at least `min_nodes`. You will be charged for the time in which additional nodes are used. If `min_nodes` is not specified and AutoScaling is used with a [legacy (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction), `min_nodes` defaults to 0, in which case, when traffic to a model stops (and after a cool-down period), nodes will be shut down and no charges will be incurred until traffic to the model resumes. If `min_nodes` is not specified and AutoScaling is used with a [Compute Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction), `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a Compute Engine machine type. You can set `min_nodes` when creating the model version, and you can also update `min_nodes` for an existing version: update_body.json: { 'autoScaling': { 'minNodes': 5 \} \} HTTP request: PATCH https://ml.googleapis.com/v1/{name=projects/x/models/x/versions/x\}?update_mask=autoScaling.minNodes -d @./update_body.json */ minNodes?: number | null; } /** * Represents output related to a built-in algorithm Job. */ export interface Schema$GoogleCloudMlV1__BuiltInAlgorithmOutput { /** * Framework on which the built-in algorithm was trained. */ framework?: string | null; /** * The Cloud Storage path to the `model/` directory where the training job saves the trained model. Only set for successful jobs that don't use hyperparameter tuning. */ modelPath?: string | null; /** * Python version on which the built-in algorithm was trained. */ pythonVersion?: string | null; /** * AI Platform runtime version on which the built-in algorithm was trained. */ runtimeVersion?: string | null; } /** * Request message for the CancelJob method. */ export interface Schema$GoogleCloudMlV1__CancelJobRequest { } export interface Schema$GoogleCloudMlV1__Capability { /** * Available accelerators for the capability. */ availableAccelerators?: string[] | null; type?: string | null; } /** * This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. */ export interface Schema$GoogleCloudMlV1__CheckTrialEarlyStoppingStateMetatdata { /** * The time at which the operation was submitted. */ createTime?: string | null; /** * The name of the study that the trial belongs to. */ study?: string | null; /** * The trial name. */ trial?: string | null; } /** * The request message for the CheckTrialEarlyStoppingState service method. */ export interface Schema$GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest { } /** * The message will be placed in the response field of a completed google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request. */ export interface Schema$GoogleCloudMlV1__CheckTrialEarlyStoppingStateResponse { /** * The time at which operation processing completed. */ endTime?: string | null; /** * True if the Trial should stop. */ shouldStop?: boolean | null; /** * The time at which the operation was started. */ startTime?: string | null; } /** * The request message for the CompleteTrial service method. */ export interface Schema$GoogleCloudMlV1__CompleteTrialRequest { /** * Optional. If provided, it will be used as the completed trial's final_measurement; Otherwise, the service will auto-select a previously reported measurement as the final-measurement */ finalMeasurement?: Schema$GoogleCloudMlV1__Measurement; /** * Optional. A human readable reason why the trial was infeasible. This should only be provided if `trial_infeasible` is true. */ infeasibleReason?: string | null; /** * Optional. True if the trial cannot be run with the given Parameter, and final_measurement will be ignored. */ trialInfeasible?: boolean | null; } export interface Schema$GoogleCloudMlV1__Config { /** * The service account Cloud ML uses to run on TPU node. */ tpuServiceAccount?: string | null; } /** * Represents a network port in a single container. This message is a subset of the [Kubernetes ContainerPort v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerport-v1-core). */ export interface Schema$GoogleCloudMlV1__ContainerPort { /** * Number of the port to expose on the container. This must be a valid port number: 0 < PORT_NUMBER < 65536. */ containerPort?: number | null; } /** * Specification of a custom container for serving predictions. This message is a subset of the [Kubernetes Container v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). */ export interface Schema$GoogleCloudMlV1__ContainerSpec { /** * Immutable. Specifies arguments for the command that runs when the container starts. This overrides the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify this field as an array of executable and arguments, similar to a Docker `CMD`'s "default parameters" form. If you don't specify this field but do specify the command field, then the command from the `command` field runs without any additional arguments. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). If you don't specify this field and don't specify the `commmand` field, then the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and `CMD` determine what runs based on their default behavior. See the [Docker documentation about how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). In this field, you can reference [environment variables set by AI Platform Prediction](/ai-platform/prediction/docs/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `args` field of the [Kubernetes Containers v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). */ args?: string[] | null; /** * Immutable. Specifies the command that runs when the container starts. This overrides the container's [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#entrypoint). Specify this field as an array of executable and arguments, similar to a Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not specify this field, then the container's `ENTRYPOINT` runs, in conjunction with the args field or the container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either exists. If this field is not specified and the container does not have an `ENTRYPOINT`, then refer to the [Docker documentation about how `CMD` and `ENTRYPOINT` interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). If you specify this field, then you can also specify the `args` field to provide additional arguments for this command. However, if you specify this field, then the container's `CMD` is ignored. See the [Kubernetes documentation about how the `command` and `args` fields interact with a container's `ENTRYPOINT` and `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). In this field, you can reference [environment variables set by AI Platform Prediction](/ai-platform/prediction/docs/custom-container-requirements#aip-variables) and environment variables set in the env field. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $( VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field corresponds to the `command` field of the [Kubernetes Containers v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). */ command?: string[] | null; /** * Immutable. List of environment variables to set in the container. After the container starts running, code running in the container can read these environment variables. Additionally, the command and args fields can reference these variables. Later entries in this list can also reference earlier entries. For example, the following example sets the variable `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": "foo" \}, { "name": "VAR_2", "value": "$(VAR_1) bar" \} ] ``` If you switch the order of the variables in the example, then the expansion does not occur. This field corresponds to the `env` field of the [Kubernetes Containers v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). */ env?: Schema$GoogleCloudMlV1__EnvVar[]; /** * URI of the Docker image to be used as the custom container for serving predictions. This URI must identify [an image in Artifact Registry](/artifact-registry/docs/overview) and begin with the hostname `{REGION\}-docker.pkg.dev`, where `{REGION\}` is replaced by the region that matches AI Platform Prediction [regional endpoint](/ai-platform/prediction/docs/regional-endpoints) that you are using. For example, if you are using the `us-central1-ml.googleapis.com` endpoint, then this URI must begin with `us-central1-docker.pkg.dev`. To use a custom container, the [AI Platform Google-managed service account](/ai-platform/prediction/docs/custom-service-account#default) must have permission to pull (read) the Docker image at this URI. The AI Platform Google-managed service account has the following format: `service-{PROJECT_NUMBER\}@cloud-ml.google.com.iam.gserviceaccount.com` {PROJECT_NUMBER\} is replaced by your Google Cloud project number. By default, this service account has necessary permissions to pull an Artifact Registry image in the same Google Cloud project where you are using AI Platform Prediction. In this case, no configuration is necessary. If you want to use an image from a different Google Cloud project, learn how to [grant the Artifact Registry Reader (roles/artifactregistry.reader) role for a repository](/artifact-registry/docs/access-control#grant-repo) to your projet's AI Platform Google-managed service account. To learn about the requirements for the Docker image itself, read [Custom container requirements](/ai-platform/prediction/docs/custom-container-requirements). */ image?: string | null; /** * Immutable. List of ports to expose from the container. AI Platform Prediction sends any prediction requests that it receives to the first port on this list. AI Platform Prediction also sends [liveness and health checks](/ai-platform/prediction/docs/custom-container-requirements#health) to this port. If you do not specify this field, it defaults to following value: ```json [ { "containerPort": 8080 \} ] ``` AI Platform Prediction does not use ports other than the first one listed. This field corresponds to the `ports` field of the [Kubernetes Containers v1 core API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core). */ ports?: Schema$GoogleCloudMlV1__ContainerPort[]; } /** * Represents the config of disk options. */ export interface Schema$GoogleCloudMlV1__DiskConfig { /** * Size in GB of the boot disk (default is 100GB). */ bootDiskSizeGb?: number | null; /** * Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). */ bootDiskType?: string | null; } /** * Represents a custom encryption key configuration that can be applied to a resource. */ export interface Schema$GoogleCloudMlV1__EncryptionConfig { /** * The Cloud KMS resource identifier of the customer-managed encryption key used to protect a resource, such as a training job. It has the following format: `projects/{PROJECT_ID\}/locations/{REGION\}/keyRings/{KEY_RING_NAME\}/cryptoKeys/{KEY_NAME\}` */ kmsKeyName?: string | null; } /** * Represents an environment variable to be made available in a container. This message is a subset of the [Kubernetes EnvVar v1 core specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#envvar-v1-core). */ export interface Schema$GoogleCloudMlV1__EnvVar { /** * Name of the environment variable. Must be a [valid C identifier](https://github.com/kubernetes/kubernetes/blob/v1.18.8/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L258) and must not begin with the prefix `AIP_`. */ name?: string | null; /** * Value of the environment variable. Defaults to an empty string. In this field, you can reference [environment variables set by AI Platform Prediction](/ai-platform/prediction/docs/custom-container-requirements#aip-variables) and environment variables set earlier in the same env field as where this message occurs. You cannot reference environment variables set in the Docker image. In order for environment variables to be expanded, reference them by using the following syntax: $(VARIABLE_NAME) Note that this differs from Bash variable expansion, which does not use parentheses. If a variable cannot be resolved, the reference in the input string is used unchanged. To avoid variable expansion, you can escape this syntax with `$$`; for example: $$(VARIABLE_NAME) */ value?: string | null; } /** * Request for explanations to be issued against a trained model. */ export interface Schema$GoogleCloudMlV1__ExplainRequest { /** * Required. The explanation request body. */ httpBody?: Schema$GoogleApi__HttpBody; } /** * Message holding configuration options for explaining model predictions. There are three feature attribution methods supported for TensorFlow models: integrated gradients, sampled Shapley, and XRAI. [Learn more about feature attributions.](/ai-platform/prediction/docs/ai-explanations/overview) */ export interface Schema$GoogleCloudMlV1__ExplanationConfig { /** * Attributes credit by computing the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */ integratedGradientsAttribution?: Schema$GoogleCloudMlV1__IntegratedGradientsAttribution; /** * An attribution method that approximates Shapley values for features that contribute to the label being predicted. A sampling strategy is used to approximate the value rather than considering all subsets of features. */ sampledShapleyAttribution?: Schema$GoogleCloudMlV1__SampledShapleyAttribution; /** * Attributes credit by computing the XRAI taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1906.02825 Currently only implemented for models with natural image inputs. */ xraiAttribution?: Schema$GoogleCloudMlV1__XraiAttribution; } /** * Returns service account information associated with a project. */ export interface Schema$GoogleCloudMlV1__GetConfigResponse { config?: Schema$GoogleCloudMlV1__Config; /** * The service account Cloud ML uses to access resources in the project. */ serviceAccount?: string | null; /** * The project number for `service_account`. */ serviceAccountProject?: string | null; } /** * Represents the result of a single hyperparameter tuning trial from a training job. The TrainingOutput object that is returned on successful completion of a training job with hyperparameter tuning includes a list of HyperparameterOutput objects, one for each successful trial. */ export interface Schema$GoogleCloudMlV1__HyperparameterOutput { /** * All recorded object metrics for this trial. This field is not currently populated. */ allMetrics?: Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric[]; /** * Details related to built-in algorithms jobs. Only set for trials of built-in algorithms jobs that have succeeded. */ builtInAlgorithmOutput?: Schema$GoogleCloudMlV1__BuiltInAlgorithmOutput; /** * Output only. End time for the trial. */ endTime?: string | null; /** * The final objective metric seen for this trial. */ finalMetric?: Schema$GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric; /** * The hyperparameters given to this trial. */ hyperparameters?: { [key: string]: string; } | null; /** * True if the trial is stopped early. */ isTrialStoppedEarly?: boolean | null; /** * Output only. Start time for the trial. */ startTime?: string | null; /** * Output only. The detailed state of the trial. */ state?: string | null; /** * The trial id for these results. */ trialId?: string | null; /** * URIs for accessing [interactive shells](https://cloud.google.com/ai-platform/training/docs/monitor-debug-interactive-shell) (one URI for each training node). Only available if this trial is part of a hyperparameter tuning job and the job's training_input.enable_web_access is `true`. The keys are names of each node in the training job; for example, `master-replica-0` for the master node, `worker-replica-0` for the first worker, and `ps-replica-0` for the first parameter server. The values are the URIs for each node's interactive shell. */ webAccessUris?: { [key: string]: string; } | null; } /** * Represents a set of hyperparameters to optimize. */ export interface Schema$GoogleCloudMlV1__HyperparameterSpec { /** * Optional. The search algorithm specified for the hyperparameter tuning job. Uses the default AI Platform hyperparameter tuning algorithm if unspecified. */ algorithm?: string | null; /** * Optional. Indicates if the hyperparameter tuning job enables auto trial early stopping. */ enableTrialEarlyStopping?: boolean | null; /** * Required. The type of goal to use for tuning. Available types are `MAXIMIZE` and `MINIMIZE`. Defaults to `MAXIMIZE`. */ goal?: string | null; /** * Optional. The TensorFlow summary tag name to use for optimizing trials. For current versions of TensorFlow, this tag name should exactly match what is shown in TensorBoard, including all scopes. For versions of TensorFlow prior to 0.12, this should be only the tag passed to tf.Summary. By default, "training/hptuning/metric" will be used. */ hyperparameterMetricTag?: string | null; /** * Optional. The number of failed trials that need to be seen before failing the hyperparameter tuning job. You can specify this field to override the default failing criteria for AI Platform hyperparameter tuning jobs. Defaults to zero, which means the service decides when a hyperparameter job should fail. */ maxFailedTrials?: number | null; /** * Optional. The number of training trials to run concurrently. You can reduce the time it takes to perform hyperparameter tuning by adding trials in parallel. However, each trail only benefits from the information gained in completed trials. That means that a trial does not get access to the results of trials running at the same time, which could reduce the quality of the overall optimization. Each trial will use the same scale tier and machine types. Defaults to one. */ maxParallelTrials?: number | null; /** * Optional. How many training trials should be attempted to optimize the specified hyperparameters. Defaults to one. */ maxTrials?: number | null; /** * Required. The set of parameters to tune. */ params?: Schema$GoogleCloudMlV1__ParameterSpec[]; /** * Optional. The prior hyperparameter tuning job id that users hope to continue with. The job id will be used to find the corresponding vizier study guid and resume the study. */ resumePreviousJobId?: string | null; } /** * Attributes credit by computing the Aumann-Shapley value taking advantage of the model's fully differentiable structure. Refer to this paper for more details: https://arxiv.org/abs/1703.01365 */ export interface Schema$GoogleCloudMlV1__IntegratedGradientsAttribution { /** * Number of steps for approximating the path integral. A good value to start is 50 and gradually increase until the sum to diff property is met within the desired error range. */ numIntegralSteps?: number | null; } /** * Represents a training or prediction job. */ export interface Schema$GoogleCloudMlV1__Job { /** * Output only. When the job was created. */ createTime?: string | null; /** * Output only. When the job processing was completed. */ endTime?: string | null; /** * Output only. The details of a failure or a cancellation. */ errorMessage?: string | null; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a job from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform job updates in order to avoid race conditions: An `etag` is returned in the response to `GetJob`, and systems are expected to put that etag in the request to `UpdateJob` to ensure that their change will be applied to the same version of the job. */ etag?: string | null; /** * Required. The user-specified id of the job. */ jobId?: string | null; /** * Output only. It's only effect when the job is in QUEUED state. If it's positive, it indicates the job's position in the job scheduler. It's 0 when the job is already scheduled. */ jobPosition?: string | null; /** * Optional. One or more labels that you can add, to organize your jobs. Each label is a key-value pair, where both the key and the value are arbitrary strings that you supply. For more information, see the documentation on using labels. */ labels?: { [key: string]: string; } | null; /** * Input parameters to create a prediction job. */ predictionInput?: Schema$GoogleCloudMlV1__PredictionInput; /** * The current prediction job result. */ predictionOutput?: Schema$GoogleCloudMlV1__PredictionOutput; /** * Output only. When the job processing was started. */ startTime?: string | null; /** * Output only. The detailed state of a job. */ state?: string | null; /** * Input parameters to create a training job. */ trainingInput?: Schema$GoogleCloudMlV1__TrainingInput; /** * The current training job result. */ trainingOutput?: Schema$GoogleCloudMlV1__TrainingOutput; } /** * Response message for the ListJobs method. */ export interface Schema$GoogleCloudMlV1__ListJobsResponse { /** * The list of jobs. */ jobs?: Schema$GoogleCloudMlV1__Job[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string | null; } export interface Schema$GoogleCloudMlV1__ListLocationsResponse { /** * Locations where at least one type of CMLE capability is available. */ locations?: Schema$GoogleCloudMlV1__Location[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string | null; } /** * Response message for the ListModels method. */ export interface Schema$GoogleCloudMlV1__ListModelsResponse { /** * The list of models. */ models?: Schema$GoogleCloudMlV1__Model[]; /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string | null; } /** * The request message for the ListTrials service method. */ export interface Schema$GoogleCloudMlV1__ListOptimalTrialsRequest { } /** * The response message for the ListOptimalTrials method. */ export interface Schema$GoogleCloudMlV1__ListOptimalTrialsResponse { /** * The pareto-optimal trials for multiple objective study or the optimal trial for single objective study. The definition of pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency */ trials?: Schema$GoogleCloudMlV1__Trial[]; } export interface Schema$GoogleCloudMlV1__ListStudiesResponse { /** * The studies associated with the project. */ studies?: Schema$GoogleCloudMlV1__Study[]; } /** * The response message for the ListTrials method. */ export interface Schema$GoogleCloudMlV1__ListTrialsResponse { /** * The trials associated with the study. */ trials?: Schema$GoogleCloudMlV1__Trial[]; } /** * Response message for the ListVersions method. */ export interface Schema$GoogleCloudMlV1__ListVersionsResponse { /** * Optional. Pass this token as the `page_token` field of the request for a subsequent call. */ nextPageToken?: string | null; /** * The list of versions. */ versions?: Schema$GoogleCloudMlV1__Version[]; } export interface Schema$GoogleCloudMlV1__Location { /** * Capabilities available in the location. */ capabilities?: Schema$GoogleCloudMlV1__Capability[]; name?: string | null; } /** * Options for manually scaling a model. */ export interface Schema$GoogleCloudMlV1__ManualScaling { /** * The number of nodes to allocate for this model. These nodes are always up, starting from the time the model is deployed, so the cost of operating this model will be proportional to `nodes` * number of hours since last billing cycle plus the cost for each prediction performed. */ nodes?: number | null; } /** * A message representing a measurement. */ export interface Schema$GoogleCloudMlV1__Measurement { /** * Output only. Time that the trial has been running at the point of this measurement. */ elapsedTime?: string | null; /** * Provides a list of metrics that act as inputs into the objective function. */ metrics?: Schema$GoogleCloudMlV1_Measurement_Metric[]; /** * The number of steps a machine learning model has been trained for. Must be non-negative. */ stepCount?: string | null; } /** * MetricSpec contains the specifications to use to calculate the desired nodes count when autoscaling is enabled. */ export interface Schema$GoogleCloudMlV1__MetricSpec { /** * metric name. */ name?: string | null; /** * Target specifies the target value for the given metric; once real metric deviates from the threshold by a certain percentage, the node count changes. */ target?: number | null; } /** * Represents a machine learning solution. A model can have multiple versions, each of which is a deployed, trained model ready to receive prediction requests. The model itself is just a container. */ export interface Schema$GoogleCloudMlV1__Model { /** * Output only. The default version of the model. This version will be used to handle prediction requests that do not specify a version. You can change the default version by calling projects.models.versions.setDefault. */ defaultVersion?: Schema$GoogleCloudMlV1__Version; /** * Optional. The description specified for the model when it was created. */ description?: string | null; /** * `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a model from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform model updates in order to avoid race conditions: An `etag` is returned in the response to `GetModel`, and systems are expected to put that etag in the request to `UpdateModel` to ensure that their change will be applied to the model as intended. */ etag?: string | null; /** * Optional. One or more labels that you can add, to organize your models. Each label is a key-value pair, where both the key and the value are arbitrary strings that you supply. For more information, see the documentation on using labels. Note that this field is not updatable for mls1* models. */ labels?: { [key: string]: string; } | null; /** * Required. The name specified for the model when it was created. The model name must be unique within the project it is created in. */ name?: string | null; /** * Optional. If true, online prediction nodes send `stderr` and `stdout` streams to Cloud Logging. These can be more verbose than the standard access logs (see `onlinePredictionLogging`) and can incur higher cost. However, they are helpful for debugging. Note that [logs may incur a cost](/stackdriver/pricing), especially if your project receives prediction requests at a high QPS. Estimate your costs before enabling this option. Default is false. */ onlinePredictionConsoleLogging?: boolean | null; /** * Optional. If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each request. Note that [logs may incur a cost](/stackdriver/pricing), especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. Default is false. */ onlinePredictionLogging?: boolean | null; /** * Optional. The list of regions where the model is going to be deployed. Only one region per model is supported. Defaults to 'us-central1' if nothing is set. See the available regions for AI Platform services. Note: * No matter where a model is deployed, it can always be accessed by users from anywhere, both for online and batch prediction. * The region for a batch prediction job is set by the region field when submitting the batch prediction job and does not take its value from this field. */ regions?: string[] | null; } /** * Represents the metadata of the long-running operation. */ export interface Schema$GoogleCloudMlV1__OperationMetadata { /** * The time the operation was submitted. */ createTime?: string | null; /** * The time operation processing completed. */ endTime?: string | null; /** * Indicates whether a request to cancel this operation has been made. */ isCancellationRequested?: boolean | null; /** * The user labels, inherited from the model or the model version being operated on. */ labels?: { [key: string]: string; } | null; /** * Contains the name of the model associated with the operation. */ modelName?: string | null; /** * The operation type. */ operationType?: string | null; /** * Contains the project number associated with the operation. */ projectNumber?: string | null; /** * The time operation processing started. */ startTime?: string | null; /** * Contains the version associated with the operation. */ version?: Schema$GoogleCloudMlV1__Version; } /** * Represents a single hyperparameter to optimize. */ export interface Schema$GoogleCloudMlV1__ParameterSpec { /** * Required if type is `CATEGORICAL`. The list of possible categories. */ categoricalValues?: string[] | null; /** * Required if type is `DISCRETE`. A list of feasible points. The list should be in strictly increasing order. For instance, this parameter might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. */ discreteValues?: number[] | null; /** * Required if type is `DOUBLE` or `INTEGER`. This field should be unset if type is `CATEGORICAL`. This value should be integers if type is `INTEGER`. */ maxValue?: number | null; /** * Required if type is `DOUBLE` or `INTEGER`. This field should be unset if type is `CATEGORICAL`. This value should be integers if type is INTEGER. */ minValue?: number | null; /** * Required. The parameter name must be unique amongst all ParameterConfigs in a HyperparameterSpec message. E.g., "learning_rate". */ parameterName?: string | null; /** * Optional. How the parameter should be scaled to the hypercube. Lea