UNPKG

@googleapis/language

Version:
1,187 lines (1,186 loc) 120 kB
/// <reference types="node" /> import { OAuth2Client, JWT, Compute, UserRefreshClient, BaseExternalAccountClient, GaxiosPromise, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { Readable } from 'stream'; export declare namespace language_v1beta2 { export interface Options extends GlobalOptions { version: 'v1beta2'; } interface StandardParameters { /** * Auth client or API Key for the request */ auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | BaseExternalAccountClient | GoogleAuth; /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Cloud Natural Language API * * Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers. * * @example * ```js * const {google} = require('googleapis'); * const language = google.language('v1beta2'); * ``` */ export class Language { context: APIRequestContext; documents: Resource$Documents; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * The entity analysis request message. */ export interface Schema$AnalyzeEntitiesRequest { /** * Required. Input document. */ document?: Schema$Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: string | null; } /** * The entity analysis response message. */ export interface Schema$AnalyzeEntitiesResponse { /** * The recognized entities in the input document. */ entities?: Schema$Entity[]; /** * The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. */ language?: string | null; } /** * The entity-level sentiment analysis request message. */ export interface Schema$AnalyzeEntitySentimentRequest { /** * Required. Input document. */ document?: Schema$Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: string | null; } /** * The entity-level sentiment analysis response message. */ export interface Schema$AnalyzeEntitySentimentResponse { /** * The recognized entities in the input document with associated sentiments. */ entities?: Schema$Entity[]; /** * The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. */ language?: string | null; } /** * The sentiment analysis request message. */ export interface Schema$AnalyzeSentimentRequest { /** * Required. Input document. */ document?: Schema$Document; /** * The encoding type used by the API to calculate sentence offsets for the sentence sentiment. */ encodingType?: string | null; } /** * The sentiment analysis response message. */ export interface Schema$AnalyzeSentimentResponse { /** * The overall sentiment of the input document. */ documentSentiment?: Schema$Sentiment; /** * The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. */ language?: string | null; /** * The sentiment for all the sentences in the document. */ sentences?: Schema$Sentence[]; } /** * The syntax analysis request message. */ export interface Schema$AnalyzeSyntaxRequest { /** * Required. Input document. */ document?: Schema$Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: string | null; } /** * The syntax analysis response message. */ export interface Schema$AnalyzeSyntaxResponse { /** * The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. */ language?: string | null; /** * Sentences in the input document. */ sentences?: Schema$Sentence[]; /** * Tokens, along with their syntactic information, in the input document. */ tokens?: Schema$Token[]; } /** * The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call. */ export interface Schema$AnnotateTextRequest { /** * Required. Input document. */ document?: Schema$Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: string | null; /** * Required. The enabled features. */ features?: Schema$AnnotateTextRequestFeatures; } /** * All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input. */ export interface Schema$AnnotateTextRequestFeatures { /** * Optional. The model options to use for classification. Defaults to v1 options if not specified. Only used if `classify_text` is set to true. */ classificationModelOptions?: Schema$ClassificationModelOptions; /** * Classify the full document into categories. If this is true, the API will use the default model which classifies into a [predefined taxonomy](https://cloud.google.com/natural-language/docs/categories). */ classifyText?: boolean | null; /** * Extract document-level sentiment. */ extractDocumentSentiment?: boolean | null; /** * Extract entities. */ extractEntities?: boolean | null; /** * Extract entities and their associated sentiment. */ extractEntitySentiment?: boolean | null; /** * Extract syntax information. */ extractSyntax?: boolean | null; /** * Moderate the document for harmful and sensitive categories. */ moderateText?: boolean | null; } /** * The text annotations response message. */ export interface Schema$AnnotateTextResponse { /** * Categories identified in the input document. */ categories?: Schema$ClassificationCategory[]; /** * The overall sentiment for the document. Populated if the user enables AnnotateTextRequest.Features.extract_document_sentiment. */ documentSentiment?: Schema$Sentiment; /** * Entities, along with their semantic information, in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_entities. */ entities?: Schema$Entity[]; /** * The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details. */ language?: string | null; /** * Harmful and sensitive categories identified in the input document. */ moderationCategories?: Schema$ClassificationCategory[]; /** * Sentences in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_syntax. */ sentences?: Schema$Sentence[]; /** * Tokens, along with their syntactic information, in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_syntax. */ tokens?: Schema$Token[]; } /** * Represents a category returned from the text classifier. */ export interface Schema$ClassificationCategory { /** * The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text. */ confidence?: number | null; /** * The name of the category representing the document. */ name?: string | null; } /** * Model options available for classification requests. */ export interface Schema$ClassificationModelOptions { /** * Setting this field will use the V1 model and V1 content categories version. The V1 model is a legacy model; support for this will be discontinued in the future. */ v1Model?: Schema$ClassificationModelOptionsV1Model; /** * Setting this field will use the V2 model with the appropriate content categories version. The V2 model is a better performing model. */ v2Model?: Schema$ClassificationModelOptionsV2Model; } /** * Options for the V1 model. */ export interface Schema$ClassificationModelOptionsV1Model { } /** * Options for the V2 model. */ export interface Schema$ClassificationModelOptionsV2Model { /** * The content categories used for classification. */ contentCategoriesVersion?: string | null; } /** * The document classification request message. */ export interface Schema$ClassifyTextRequest { /** * Optional. Model options to use for classification. Defaults to v1 options if not specified. */ classificationModelOptions?: Schema$ClassificationModelOptions; /** * Required. Input document. */ document?: Schema$Document; } /** * The document classification response message. */ export interface Schema$ClassifyTextResponse { /** * Categories representing the input document. */ categories?: Schema$ClassificationCategory[]; } /** * Represents a color in the RGBA color space. This representation is designed for simplicity of conversion to and from color representations in various languages over compactness. For example, the fields of this representation can be trivially provided to the constructor of `java.awt.Color` in Java; it can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` method in iOS; and, with just a little work, it can be easily formatted into a CSS `rgba()` string in JavaScript. This reference page doesn't have information about the absolute color space that should be used to interpret the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, applications should assume the sRGB color space. When color equality needs to be decided, implementations, unless documented otherwise, treat two colors as equal if all their red, green, blue, and alpha values each differ by at most `1e-5`. Example (Java): import com.google.type.Color; // ... public static java.awt.Color fromProto(Color protocolor) { float alpha = protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new java.awt.Color( protocolor.getRed(), protocolor.getGreen(), protocolor.getBlue(), alpha); \} public static Color toProto(java.awt.Color color) { float red = (float) color.getRed(); float green = (float) color.getGreen(); float blue = (float) color.getBlue(); float denominator = 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue .newBuilder() .setValue(((float) alpha) / denominator) .build()); \} return resultBuilder.build(); \} // ... Example (iOS / Obj-C): // ... static UIColor* fromProto(Color* protocolor) { float red = [protocolor red]; float green = [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = [alpha_wrapper value]; \} return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; \} static Color* toProto(UIColor* color) { CGFloat red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { return nil; \} Color* result = [[Color alloc] init]; [result setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; \} [result autorelease]; return result; \} // ... Example (JavaScript): // ... var protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return rgbToCssColor(red, green, blue); \} var alphaFrac = rgb_color.alpha.value || 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); \}; var rgbToCssColor = function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - hexString.length; var resultBuilder = ['#']; for (var i = 0; i < missingZeros; i++) { resultBuilder.push('0'); \} resultBuilder.push(hexString); return resultBuilder.join(''); \}; // ... */ export interface Schema$Color { /** * The fraction of this color that should be applied to the pixel. That is, the final pixel color is defined by the equation: `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` This means that a value of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to a completely transparent color. This uses a wrapper message rather than a simple float scalar so that it is possible to distinguish between a default value and the value being unset. If omitted, this color object is rendered as a solid color (as if the alpha value had been explicitly given a value of 1.0). */ alpha?: number | null; /** * The amount of blue in the color as a value in the interval [0, 1]. */ blue?: number | null; /** * The amount of green in the color as a value in the interval [0, 1]. */ green?: number | null; /** * The amount of red in the color as a value in the interval [0, 1]. */ red?: number | null; } /** * Metric for billing reports. */ export interface Schema$CpuMetric { /** * Required. Number of CPU cores. */ coreNumber?: string | null; /** * Required. Total seconds of core usage, e.g. 4. */ coreSec?: string | null; /** * Required. Type of cpu, e.g. N2. */ cpuType?: string | null; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: string | null; /** * Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll\}\p{N\}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string; } | null; } /** * Represents dependency parse tree information for a token. */ export interface Schema$DependencyEdge { /** * Represents the head of this token in the dependency tree. This is the index of the token which has an arc going to this token. The index is the position of the token in the array of tokens returned by the API method. If this token is a root token, then the `head_token_index` is its own index. */ headTokenIndex?: number | null; /** * The parse label for the token. */ label?: string | null; } export interface Schema$DiskMetric { /** * Required. Type of Disk, e.g. REGIONAL_SSD. */ diskType?: string | null; /** * Required. Seconds of physical disk usage, e.g. 3600. */ gibSec?: string | null; } /** * Represents the input to API methods. */ export interface Schema$Document { /** * Indicates how detected boilerplate(e.g. advertisements, copyright declarations, banners) should be handled for this document. If not specified, boilerplate will be treated the same as content. */ boilerplateHandling?: string | null; /** * The content of the input in string format. Cloud audit logging exempt since it is based on user data. */ content?: string | null; /** * The Google Cloud Storage URI where the file content is located. This URI must be of the form: gs://bucket_name/object_name. For more details, see https://cloud.google.com/storage/docs/reference-uris. NOTE: Cloud Storage object versioning is not supported. */ gcsContentUri?: string | null; /** * The language of the document (if not specified, the language is automatically detected). Both ISO and BCP-47 language codes are accepted. [Language Support](https://cloud.google.com/natural-language/docs/languages) lists currently supported languages for each API method. If the language (either specified by the caller or automatically detected) is not supported by the called API method, an `INVALID_ARGUMENT` error is returned. */ language?: string | null; /** * The web URI where the document comes from. This URI is not used for fetching the content, but as a hint for analyzing the document. */ referenceWebUri?: string | null; /** * Required. If the type is not set or is `TYPE_UNSPECIFIED`, returns an `INVALID_ARGUMENT` error. */ type?: string | null; } /** * Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities. */ export interface Schema$Entity { /** * The mentions of this entity in the input document. The API currently supports proper noun mentions. */ mentions?: Schema$EntityMention[]; /** * Metadata associated with the entity. For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`), if they are available. For the metadata associated with other entity types, see the Type table below. */ metadata?: { [key: string]: string; } | null; /** * The representative name for the entity. */ name?: string | null; /** * The salience score associated with the entity in the [0, 1.0] range. The salience score for an entity provides information about the importance or centrality of that entity to the entire document text. Scores closer to 0 are less salient, while scores closer to 1.0 are highly salient. */ salience?: number | null; /** * For calls to AnalyzeEntitySentiment or if AnnotateTextRequest.Features.extract_entity_sentiment is set to true, this field will contain the aggregate sentiment expressed for this entity in the provided document. */ sentiment?: Schema$Sentiment; /** * The entity type. */ type?: string | null; } /** * Represents a mention for an entity in the text. Currently, proper noun mentions are supported. */ export interface Schema$EntityMention { /** * For calls to AnalyzeEntitySentiment or if AnnotateTextRequest.Features.extract_entity_sentiment is set to true, this field will contain the sentiment expressed for this mention of the entity in the provided document. */ sentiment?: Schema$Sentiment; /** * The mention text. */ text?: Schema$TextSpan; /** * The type of the entity mention. */ type?: string | null; } export interface Schema$GpuMetric { /** * Required. Seconds of GPU usage, e.g. 3600. */ gpuSec?: string | null; /** * Required. Type of GPU, e.g. NVIDIA_TESLA_V100. */ gpuType?: string | null; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: string | null; /** * Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll\}\p{N\}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string; } | null; } /** * LINT: LEGACY_NAMES Infra Usage of billing metrics. */ export interface Schema$InfraUsage { /** * Aggregated core metrics since requested start_time. */ cpuMetrics?: Schema$CpuMetric[]; /** * Aggregated persistent disk metrics since requested start_time. */ diskMetrics?: Schema$DiskMetric[]; /** * Aggregated gpu metrics since requested start_time. */ gpuMetrics?: Schema$GpuMetric[]; /** * Aggregated ram metrics since requested start_time. */ ramMetrics?: Schema$RamMetric[]; /** * Aggregated tpu metrics since requested start_time. */ tpuMetrics?: Schema$TpuMetric[]; } /** * The document moderation request message. */ export interface Schema$ModerateTextRequest { /** * Required. Input document. */ document?: Schema$Document; } /** * The document moderation response message. */ export interface Schema$ModerateTextResponse { /** * Harmful and sensitive categories representing the input document. */ moderationCategories?: Schema$ClassificationCategory[]; } /** * Represents part of speech information for a token. */ export interface Schema$PartOfSpeech { /** * The grammatical aspect. */ aspect?: string | null; /** * The grammatical case. */ case?: string | null; /** * The grammatical form. */ form?: string | null; /** * The grammatical gender. */ gender?: string | null; /** * The grammatical mood. */ mood?: string | null; /** * The grammatical number. */ number?: string | null; /** * The grammatical person. */ person?: string | null; /** * The grammatical properness. */ proper?: string | null; /** * The grammatical reciprocity. */ reciprocity?: string | null; /** * The part of speech tag. */ tag?: string | null; /** * The grammatical tense. */ tense?: string | null; /** * The grammatical voice. */ voice?: string | null; } export interface Schema$RamMetric { /** * Required. VM memory in Gigabyte second, e.g. 3600. Using int64 type to match billing metrics definition. */ gibSec?: string | null; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: string | null; /** * Required. VM memory in gb. */ memories?: number | null; /** * Required. Type of ram. */ ramType?: string | null; /** * Billing tracking labels. They do not contain any user data but only the labels set by Vertex Core Infra itself. Tracking labels' keys are defined with special format: goog-[\p{Ll\}\p{N\}]+ E.g. "key": "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string; } | null; } /** * Represents a sentence in the input document. */ export interface Schema$Sentence { /** * For calls to AnalyzeSentiment or if AnnotateTextRequest.Features.extract_document_sentiment is set to true, this field will contain the sentiment for the sentence. */ sentiment?: Schema$Sentiment; /** * The sentence text. */ text?: Schema$TextSpan; } /** * Represents the feeling associated with the entire text or entities in the text. */ export interface Schema$Sentiment { /** * A non-negative number in the [0, +inf] range, which represents the absolute magnitude of sentiment regardless of score (positive or negative). */ magnitude?: number | null; /** * Sentiment score between -1.0 (negative sentiment) and 1.0 (positive sentiment). */ score?: number | null; } /** * The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). */ export interface Schema$Status { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number | null; /** * A list of messages that carry the error details. There is a common set of message types for APIs to use. */ details?: Array<{ [key: string]: any; }> | null; /** * A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. */ message?: string | null; } /** * Represents a text span in the input document. */ export interface Schema$TextSpan { /** * The API calculates the beginning offset of the content in the original document according to the EncodingType specified in the API request. */ beginOffset?: number | null; /** * The content of the text span, which is a substring of the document. */ content?: string | null; } /** * Represents the smallest syntactic building block of the text. */ export interface Schema$Token { /** * Dependency tree parse for this token. */ dependencyEdge?: Schema$DependencyEdge; /** * [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token. */ lemma?: string | null; /** * Parts of speech tag for this token. */ partOfSpeech?: Schema$PartOfSpeech; /** * The token text. */ text?: Schema$TextSpan; } export interface Schema$TpuMetric { /** * Required. Seconds of TPU usage, e.g. 3600. */ tpuSec?: string | null; /** * Required. Type of TPU, e.g. TPU_V2, TPU_V3_POD. */ tpuType?: string | null; } /** * The data statistics of a series of ARRAY values. */ export interface Schema$XPSArrayStats { commonStats?: Schema$XPSCommonStats; /** * Stats of all the values of all arrays, as if they were a single long series of data. The type depends on the element type of the array. */ memberStats?: Schema$XPSDataStats; } export interface Schema$XPSBatchPredictResponse { /** * Examples for batch prediction result. Under full API implementation, results are stored in shared RecordIO of AnnotatedExample protobufs, the annotations field of which is populated by XPS backend. */ exampleSet?: Schema$XPSExampleSet; } /** * Bounding box matching model metrics for a single intersection-over-union threshold and multiple label match confidence thresholds. */ export interface Schema$XPSBoundingBoxMetricsEntry { /** * Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ confidenceMetricsEntries?: Schema$XPSBoundingBoxMetricsEntryConfidenceMetricsEntry[]; /** * The intersection-over-union threshold value used to compute this metrics entry. */ iouThreshold?: number | null; /** * The mean average precision. */ meanAveragePrecision?: number | null; } /** * Metrics for a single confidence threshold. */ export interface Schema$XPSBoundingBoxMetricsEntryConfidenceMetricsEntry { /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number | null; /** * The harmonic mean of recall and precision. */ f1Score?: number | null; /** * Precision for the given confidence threshold. */ precision?: number | null; /** * Recall for the given confidence threshold. */ recall?: number | null; } /** * The data statistics of a series of CATEGORY values. */ export interface Schema$XPSCategoryStats { commonStats?: Schema$XPSCommonStats; /** * The statistics of the top 20 CATEGORY values, ordered by CategoryStats.SingleCategoryStats.count. */ topCategoryStats?: Schema$XPSCategoryStatsSingleCategoryStats[]; } /** * The statistics of a single CATEGORY value. */ export interface Schema$XPSCategoryStatsSingleCategoryStats { /** * The number of occurrences of this value in the series. */ count?: string | null; /** * The CATEGORY value. */ value?: string | null; } /** * Model evaluation metrics for classification problems. It can be used for image and video classification. Next tag: 9. */ export interface Schema$XPSClassificationEvaluationMetrics { /** * The Area under precision recall curve metric. */ auPrc?: number | null; /** * The Area Under Receiver Operating Characteristic curve metric. Micro-averaged for the overall evaluation. */ auRoc?: number | null; /** * The Area under precision recall curve metric based on priors. */ baseAuPrc?: number | null; /** * Metrics that have confidence thresholds. Precision-recall curve can be derived from it. */ confidenceMetricsEntries?: Schema$XPSConfidenceMetricsEntry[]; /** * Confusion matrix of the evaluation. Only set for MULTICLASS classification problems where number of annotation specs is no more than 10. Only set for model level evaluation, not for evaluation per label. */ confusionMatrix?: Schema$XPSConfusionMatrix; /** * The number of examples used for model evaluation. */ evaluatedExamplesCount?: number | null; /** * The Log Loss metric. */ logLoss?: number | null; } /** * Map from color to display name. Will only be used by Image Segmentation for uCAIP. */ export interface Schema$XPSColorMap { /** * Should be used during training. */ annotationSpecIdToken?: string | null; /** * This type is deprecated in favor of the IntColor below. This is because google.type.Color represent color has a float which semantically does not reflect discrete classes/categories concept. Moreover, to handle it well we need to have some tolerance when converting to a discretized color. As such, the recommendation is to have API surface still use google.type.Color while internally IntColor is used. */ color?: Schema$Color; /** * Should be used during preprocessing. */ displayName?: string | null; intColor?: Schema$XPSColorMapIntColor; } /** * RGB color and each channel is represented by an integer. */ export interface Schema$XPSColorMapIntColor { /** * The value should be in range of [0, 255]. */ blue?: number | null; /** * The value should be in range of [0, 255]. */ green?: number | null; /** * The value should be in range of [0, 255]. */ red?: number | null; } export interface Schema$XPSColumnSpec { /** * The unique id of the column. When Preprocess, the Tables BE will popuate the order id of the column, which reflects the order of the column inside the table, i.e. 0 means the first column in the table, N-1 means the last column. AutoML BE will persist this order id in Spanner and set the order id here when calling RefreshTablesStats and Train. Note: it's different than the column_spec_id that is generated in AutoML BE. */ columnId?: number | null; /** * The data stats of the column. It's outputed in RefreshTablesStats and a required input for Train. */ dataStats?: Schema$XPSDataStats; /** * The data type of the column. It's outputed in Preprocess rpc and a required input for RefreshTablesStats and Train. */ dataType?: Schema$XPSDataType; /** * The display name of the column. It's outputed in Preprocess and a required input for RefreshTablesStats and Train. */ displayName?: string | null; forecastingMetadata?: Schema$XPSColumnSpecForecastingMetadata; /** * It's outputed in RefreshTablesStats, and a required input in Train. */ topCorrelatedColumns?: Schema$XPSColumnSpecCorrelatedColumn[]; } /** * Identifies a table's column, and its correlation with the column this ColumnSpec describes. */ export interface Schema$XPSColumnSpecCorrelatedColumn { columnId?: number | null; correlationStats?: Schema$XPSCorrelationStats; } export interface Schema$XPSColumnSpecForecastingMetadata { /** * The type of the column for FORECASTING model training purposes. */ columnType?: string | null; } /** * Common statistics for a column with a specified data type. */ export interface Schema$XPSCommonStats { distinctValueCount?: string | null; nullValueCount?: string | null; validValueCount?: string | null; } /** * ConfidenceMetricsEntry includes generic precision, recall, f1 score etc. Next tag: 16. */ export interface Schema$XPSConfidenceMetricsEntry { /** * Metrics are computed with an assumption that the model never return predictions with score lower than this value. */ confidenceThreshold?: number | null; /** * The harmonic mean of recall and precision. */ f1Score?: number | null; /** * The harmonic mean of recall_at1 and precision_at1. */ f1ScoreAt1?: number | null; /** * The number of ground truth labels that are not matched by a model created label. */ falseNegativeCount?: string | null; /** * The number of model created labels that do not match a ground truth label. */ falsePositiveCount?: string | null; /** * False Positive Rate for the given confidence threshold. */ falsePositiveRate?: number | null; /** * The False Positive Rate when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */ falsePositiveRateAt1?: number | null; /** * Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet the confidence_threshold. */ positionThreshold?: number | null; /** * Precision for the given confidence threshold. */ precision?: number | null; /** * The precision when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */ precisionAt1?: number | null; /** * Recall (true positive rate) for the given confidence threshold. */ recall?: number | null; /** * The recall (true positive rate) when only considering the label that has the highest prediction score and not below the confidence threshold for each example. */ recallAt1?: number | null; /** * The number of labels that were not created by the model, but if they would, they would not match a ground truth label. */ trueNegativeCount?: string | null; /** * The number of model created labels that match a ground truth label. */ truePositiveCount?: string | null; } /** * Confusion matrix of the model running the classification. */ export interface Schema$XPSConfusionMatrix { /** * For the following three repeated fields, only one is intended to be set. annotation_spec_id_token is preferable to be set. ID tokens of the annotation specs used in the confusion matrix. */ annotationSpecIdToken?: string[] | null; /** * Category (mainly for segmentation). Set only for image segmentation models. Note: uCAIP Image Segmentation should use annotation_spec_id_token. */ category?: number[] | null; /** * Rows in the confusion matrix. The number of rows is equal to the size of `annotation_spec_id_token`. `row[i].value[j]` is the number of examples that have ground truth of the `annotation_spec_id_token[i]` and are predicted as `annotation_spec_id_token[j]` by the model being evaluated. */ row?: Schema$XPSConfusionMatrixRow[]; /** * Sentiment labels used in the confusion matrix. Set only for text sentiment models. For AutoML Text Revamp, use `annotation_spec_id_token` instead and leave this field empty. */ sentimentLabel?: number[] | null; } /** * A row in the confusion matrix. */ export interface Schema$XPSConfusionMatrixRow { /** * Same as above except intended to represent other counts (for e.g. for segmentation this is pixel count). NOTE(params): Only example_count or count is set (oneoff does not support repeated fields unless they are embedded inside another message). */ count?: string[] | null; /** * Value of the specific cell in the confusion matrix. The number of values each row has (i.e. the length of the row) is equal to the length of the annotation_spec_id_token field. */ exampleCount?: number[] | null; } /** * A model format used for iOS mobile devices. */ export interface Schema$XPSCoreMlFormat { } /** * A correlation statistics between two series of DataType values. The series may have differing DataType-s, but within a single series the DataType must be the same. */ export interface Schema$XPSCorrelationStats { /** * The correlation value using the Cramer's V measure. */ cramersV?: number | null; } /** * Different types of errors and the stats associatesd with each error. */ export interface Schema$XPSDataErrors { /** * Number of records having errors associated with the enum. */ count?: number | null; /** * Type of the error. */ errorType?: string | null; } /** * The data statistics of a series of values that share the same DataType. */ export interface Schema$XPSDataStats { /** * The statistics for ARRAY DataType. */ arrayStats?: Schema$XPSArrayStats; /** * The statistics for CATEGORY DataType. */ categoryStats?: Schema$XPSCategoryStats; /** * The number of distinct values. */ distinctValueCount?: string | null; /** * The statistics for FLOAT64 DataType. */ float64Stats?: Schema$XPSFloat64Stats; /** * The number of values that are null. */ nullValueCount?: string | null; /** * The statistics for STRING DataType. */ stringStats?: Schema$XPSStringStats; /** * The statistics for STRUCT DataType. */ structStats?: Schema$XPSStructStats; /** * The statistics for TIMESTAMP DataType. */ timestampStats?: Schema$XPSTimestampStats; /** * The number of values that are valid. */ validValueCount?: string | null; } /** * Indicated the type of data that can be stored in a structured data entity (e.g. a table). */ export interface Schema$XPSDataType { /** * The highly compatible data types to this data type. */ compatibleDataTypes?: Schema$XPSDataType[]; /** * If type_code == ARRAY, then `list_element_type` is the type of the elements. */ listElementType?: Schema$XPSDataType; /** * If true, this DataType can also be `null`. */ nullable?: boolean | null; /** * If type_code == STRUCT, then `struct_type` provides type information for the struct's fields. */ structType?: Schema$XPSStructType; /** * If type_code == TIMESTAMP then `time_format` provides the format in which that time field is expressed. The time_format must be written in `strftime` syntax. If time_format is not set, then the default format as described on the field is used. */ timeFormat?: string | null; /** * Required. The TypeCode for this type. */ typeCode?: string | null; } /** * A model format used for Docker containers. Use the params field to customize the container. The container is verified to work correctly on ubuntu 16.04 operating system. */ export interface Schema$XPSDockerFormat { /** * Optional. Additional cpu information describing the requirements for the to be exported model files. */ cpuArchitecture?: string | null; /** * Optional. Additional gpu information describing the requirements for the to be exported model files. */ gpuArchitecture?: string | null; } /** * A model format used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. */ export interface Schema$XPSEdgeTpuTfLiteFormat { } /** * Contains xPS-specific model evaluation metrics either for a single annotation spec (label), or for the model overall. Next tag: 18. */ export interface Schema$XPSEvaluationMetrics { /** * The annotation_spec for which this evaluation metrics instance had been created. Empty iff this is an overall model evaluation (like Tables evaluation metrics), i.e. aggregated across all labels. The value comes from the input annotations in AnnotatedExample. For MVP product or for text sentiment models where annotation_spec_id_token is not available, set label instead. */ annotationSpecIdToken?: string | null; /** * The integer category label for which this evaluation metric instance had been created. Valid categories are 0 or higher. Overall model evaluation should set this to negative values (rather than implicit zero). Only used for Image Segmentation (prefer to set annotation_spec_id_token instead). Note: uCAIP Image Segmentation should use annotation_spec_id_token. */ category?: number | null; /** * The number of examples used to create this evaluation metrics instance. */ evaluatedExampleCount?: number | null; imageClassificationEvalMetrics?: Schema$XPSClassificationEvaluationMetrics; imageObjectDetectionEvalMetrics?: Schema$XPSImageObjectDetectionEvaluationMetrics; imageSegmentationEvalMetrics?: Schema$XPSImageSegmentationEvaluationMetrics; /** * The label for which this evaluation metrics instance had been created. Empty iff this is an overall model evaluation (like Tables evaluation metrics), i.e. aggregated across all labels. The label maps to AnnotationSpec.display_name in Public API protos. Only used by MVP implementation and text sentiment FULL implementation. */ label?: string | null; regressionEvalMetrics?: Schema$XPSRegressionEvaluationMetrics; tablesClassificationEvalMetrics?: Schema$XPSClassificationEvaluationMetrics; tablesEvalMetrics?: Schema$XPSTablesEvaluationMetrics; textClassificationEvalMetrics?: Schema$XPSClassificationEvaluationMetrics; textExtractionEvalMetrics?: Schema$XPSTextExtractionEvaluationMetrics; textSentimentEvalMetrics?: Schema$XPSTextSentimentEvaluationMetrics; translationEvalMetrics?: Schema$XPSTranslationEvaluationMetrics; videoActionRecognitionEvalMetrics?: Schema$XPSVideoActionRecognitionEvaluationMetrics; videoClassificationEvalMetrics?: Schema$XPSClassificationEvaluationMetrics; videoObjectTrackingEvalMetrics?: Schema$XPSVideoObjectTrackingEvaluationMetrics; } /** * Specifies location of model evaluation metrics. */ export interface Schema$XPSEvaluationMetricsSet { /** * Inline EvaluationMetrics - should be relatively small. For passing large quantities of exhaustive metrics, use file_spec. */ evaluationMetrics?: Schema$XPSEvaluationMetrics[]; /** * File spec containing evaluation metrics of a model, must point to RecordIO file(s) of intelligence.cloud.automl.xps.EvaluationMetrics messages. */ fileSpec?: Schema$XPSFileSpec; /** * Number of the evaluation metrics (usually one per label plus overall). */ numEvaluationMetrics?: string | null; } /** * Set of examples or input sources. */ export interface Schema$XPSExampleSet { /** * File spec of the examples or input sources. */ fileSpec?: Schema$XPSFileSpec; /** * Fingerprint of the example set. */ fingerprint?: string | null; /** * Number of examples. */ numExamples?: string | null; /** * Number of input sources. */ numInputSources?: string | null; } export interface Schema$XPSExportModelOutputConfig { coreMlFormat?: Schema$XPSCoreMlFormat; dockerFormat?: Schema$XPSDockerFormat; edgeTpuTfLiteFormat?: Schema$XPSEdgeTpuTfLiteFormat; /** * For any model and format: If true, will additionally export FirebaseExportedModelInfo in a firebase.txt file. */ exportFirebaseAuxiliaryInfo?: boolean | null;