UNPKG

googleapis

Version:
1,055 lines (1,054 loc) 109 kB
/** * Copyright 2019 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { OAuth2Client, JWT, Compute, UserRefreshClient } from 'google-auth-library'; import { GoogleConfigurable, MethodOptions, GlobalOptions, BodyResponseCallback, APIRequestContext } from 'googleapis-common'; import { GaxiosPromise } from 'gaxios'; export declare namespace videointelligence_v1beta2 { interface Options extends GlobalOptions { version: 'v1beta2'; } interface StandardParameters { /** * V1 error format. */ '$.xgafv'?: string; /** * OAuth access token. */ access_token?: string; /** * Data format for response. */ alt?: string; /** * JSONP */ callback?: string; /** * Selector specifying which fields to include in a partial response. */ fields?: string; /** * API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */ key?: string; /** * OAuth 2.0 token for the current user. */ oauth_token?: string; /** * Returns response with indentations and line breaks. */ prettyPrint?: boolean; /** * Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. */ quotaUser?: string; /** * Legacy upload protocol for media (e.g. "media", "multipart"). */ uploadType?: string; /** * Upload protocol for media (e.g. "raw", "multipart"). */ upload_protocol?: string; } /** * Cloud Video Intelligence API * * Detects objects, explicit content, and scene changes in videos. It also specifies the region for annotation and transcribes speech to text. Supports both asynchronous API and streaming API. * * @example * const {google} = require('googleapis'); * const videointelligence = google.videointelligence('v1beta2'); * * @namespace videointelligence * @type {Function} * @version v1beta2 * @variation v1beta2 * @param {object=} options Options for Videointelligence */ class Videointelligence { context: APIRequestContext; videos: Resource$Videos; constructor(options: GlobalOptions, google?: GoogleConfigurable); } /** * Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1beta2_AnnotateVideoProgress { /** * Progress metadata for all videos specified in `AnnotateVideoRequest`. */ annotationProgress?: Schema$GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress[]; } /** * Video annotation request. */ interface Schema$GoogleCloudVideointelligenceV1beta2_AnnotateVideoRequest { /** * Requested video annotation features. */ features?: string[]; /** * The video data bytes. If unset, the input video(s) should be specified via `input_uri`. If set, `input_uri` should be unset. */ inputContent?: string; /** * Input video location. Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) URIs are supported, which must be specified in the following format: `gs://bucket-id/object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For more information, see [Request URIs](/storage/docs/reference-uris). A video URI may include wildcards in `object-id`, and thus identify multiple videos. Supported wildcards: '*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded in the request as `input_content`. If set, `input_content` should be unset. */ inputUri?: string; /** * Optional cloud region where annotation should take place. Supported cloud regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region is specified, a region will be determined based on video file location. */ locationId?: string; /** * Optional location where the output (in JSON format) should be stored. Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) URIs are supported, which must be specified in the following format: `gs://bucket-id/object-id` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For more information, see [Request URIs](/storage/docs/reference-uris). */ outputUri?: string; /** * Additional video context and/or feature-specific parameters. */ videoContext?: Schema$GoogleCloudVideointelligenceV1beta2_VideoContext; } /** * Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1beta2_AnnotateVideoResponse { /** * Annotation results for all videos specified in `AnnotateVideoRequest`. */ annotationResults?: Schema$GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults[]; } /** * Detected entity from video analysis. */ interface Schema$GoogleCloudVideointelligenceV1beta2_Entity { /** * Textual description, e.g. `Fixed-gear bicycle`. */ description?: string; /** * Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). */ entityId?: string; /** * Language code for `description` in BCP-47 format. */ languageCode?: string; } /** * Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentAnnotation { /** * All video frames where explicit content was detected. */ frames?: Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame[]; } /** * Config for EXPLICIT_CONTENT_DETECTION. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentDetectionConfig { /** * Model to use for explicit content detection. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". */ model?: string; } /** * Video frame level annotation results for explicit content. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame { /** * Likelihood of the pornography content.. */ pornographyLikelihood?: string; /** * Time-offset, relative to the beginning of the video, corresponding to the video frame for this location. */ timeOffset?: string; } /** * Label annotation. */ interface Schema$GoogleCloudVideointelligenceV1beta2_LabelAnnotation { /** * Common categories for the detected entity. E.g. when the label is `Terrier` the category is likely `dog`. And in some cases there might be more than one categories e.g. `Terrier` could also be a `pet`. */ categoryEntities?: Schema$GoogleCloudVideointelligenceV1beta2_Entity[]; /** * Detected entity. */ entity?: Schema$GoogleCloudVideointelligenceV1beta2_Entity; /** * All video frames where a label was detected. */ frames?: Schema$GoogleCloudVideointelligenceV1beta2_LabelFrame[]; /** * All video segments where a label was detected. */ segments?: Schema$GoogleCloudVideointelligenceV1beta2_LabelSegment[]; } /** * Config for LABEL_DETECTION. */ interface Schema$GoogleCloudVideointelligenceV1beta2_LabelDetectionConfig { /** * The confidence threshold we perform filtering on the labels from frame-level detection. If not set, it is set to 0.4 by default. The valid range for this threshold is [0.1, 0.9]. Any value set outside of this range will be clipped. Note: for best results please follow the default threshold. We will update the default threshold everytime when we release a new model. */ frameConfidenceThreshold?: number; /** * What labels should be detected with LABEL_DETECTION, in addition to video-level labels or segment-level labels. If unspecified, defaults to `SHOT_MODE`. */ labelDetectionMode?: string; /** * Model to use for label detection. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". */ model?: string; /** * Whether the video has been shot from a stationary (i.e. non-moving) camera. When set to true, might improve detection accuracy for moving objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. */ stationaryCamera?: boolean; /** * The confidence threshold we perform filtering on the labels from video-level and shot-level detections. If not set, it is set to 0.3 by default. The valid range for this threshold is [0.1, 0.9]. Any value set outside of this range will be clipped. Note: for best results please follow the default threshold. We will update the default threshold everytime when we release a new model. */ videoConfidenceThreshold?: number; } /** * Video frame level annotation results for label detection. */ interface Schema$GoogleCloudVideointelligenceV1beta2_LabelFrame { /** * Confidence that the label is accurate. Range: [0, 1]. */ confidence?: number; /** * Time-offset, relative to the beginning of the video, corresponding to the video frame for this location. */ timeOffset?: string; } /** * Video segment level annotation results for label detection. */ interface Schema$GoogleCloudVideointelligenceV1beta2_LabelSegment { /** * Confidence that the label is accurate. Range: [0, 1]. */ confidence?: number; /** * Video segment where a label was detected. */ segment?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment; } /** * Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. */ interface Schema$GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox { /** * Bottom Y coordinate. */ bottom?: number; /** * Left X coordinate. */ left?: number; /** * Right X coordinate. */ right?: number; /** * Top Y coordinate. */ top?: number; } /** * Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box. */ interface Schema$GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly { /** * Normalized vertices of the bounding polygon. */ vertices?: Schema$GoogleCloudVideointelligenceV1beta2_NormalizedVertex[]; } /** * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. */ interface Schema$GoogleCloudVideointelligenceV1beta2_NormalizedVertex { /** * X coordinate. */ x?: number; /** * Y coordinate. */ y?: number; } /** * Annotations corresponding to one tracked object. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation { /** * Object category's labeling confidence of this track. */ confidence?: number; /** * Entity to specify the object category that this track is labeled as. */ entity?: Schema$GoogleCloudVideointelligenceV1beta2_Entity; /** * Information corresponding to all frames where this object track appears. Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame messages in frames. Streaming mode: it can only be one ObjectTrackingFrame message in frames. */ frames?: Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame[]; /** * Non-streaming batch mode ONLY. Each object track corresponds to one video segment where it appears. */ segment?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment; /** * Streaming mode ONLY. In streaming mode, we do not know the end time of a tracked object before it is completed. Hence, there is no VideoSegment info returned. Instead, we provide a unique identifiable integer track_id so that the customers can correlate the results of the ongoing ObjectTrackAnnotation of the same track_id over time. */ trackId?: string; } /** * Config for OBJECT_TRACKING. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingConfig { /** * Model to use for object tracking. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". */ model?: string; } /** * Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingFrame { /** * The normalized bounding box location of this object track for the frame. */ normalizedBoundingBox?: Schema$GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox; /** * The timestamp of the frame in microseconds. */ timeOffset?: string; } /** * Config for SHOT_CHANGE_DETECTION. */ interface Schema$GoogleCloudVideointelligenceV1beta2_ShotChangeDetectionConfig { /** * Model to use for shot change detection. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". */ model?: string; } /** * Provides "hints" to the speech recognizer to favor specific words and phrases in the results. */ interface Schema$GoogleCloudVideointelligenceV1beta2_SpeechContext { /** * *Optional* A list of strings containing words and phrases "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](https://cloud.google.com/speech/limits#content). */ phrases?: string[]; } /** * Alternative hypotheses (a.k.a. n-best list). */ interface Schema$GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative { /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. */ confidence?: number; /** * Transcript text representing the words that the user spoke. */ transcript?: string; /** * Output only. A list of word-specific information for each recognized word. Note: When `enable_speaker_diarization` is true, you will see all the words from the beginning of the audio. */ words?: Schema$GoogleCloudVideointelligenceV1beta2_WordInfo[]; } /** * A speech recognition result corresponding to a portion of the audio. */ interface Schema$GoogleCloudVideointelligenceV1beta2_SpeechTranscription { /** * May contain one or more recognition hypotheses (up to the maximum specified in `max_alternatives`). These alternatives are ordered in terms of accuracy, with the top (first) alternative being the most probable, as ranked by the recognizer. */ alternatives?: Schema$GoogleCloudVideointelligenceV1beta2_SpeechRecognitionAlternative[]; /** * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the language in this result. This language code was detected to have the most likelihood of being spoken in the audio. */ languageCode?: string; } /** * Config for SPEECH_TRANSCRIPTION. */ interface Schema$GoogleCloudVideointelligenceV1beta2_SpeechTranscriptionConfig { /** * *Optional* For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0. */ audioTracks?: number[]; /** * *Optional* If set, specifies the estimated number of speakers in the conversation. If not set, defaults to '2'. Ignored unless enable_speaker_diarization is set to true. */ diarizationSpeakerCount?: number; /** * *Optional* If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses. NOTE: "This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a premium feature." */ enableAutomaticPunctuation?: boolean; /** * *Optional* If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo. Note: When this is true, we send all the words from the beginning of the audio for the top alternative in every consecutive responses. This is done in order to improve our speaker tags as our models learn to identify the speakers in the conversation over time. */ enableSpeakerDiarization?: boolean; /** * *Optional* If `true`, the top result includes a list of words and the confidence for those words. If `false`, no word-level confidence information is returned. The default is `false`. */ enableWordConfidence?: boolean; /** * *Optional* If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to `false` or omitted, profanities won't be filtered out. */ filterProfanity?: boolean; /** * *Required* The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Example: "en-US". See [Language Support](https://cloud.google.com/speech/docs/languages) for a list of the currently supported language codes. */ languageCode?: string; /** * *Optional* Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of `SpeechRecognitionAlternative` messages within each `SpeechTranscription`. The server may return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of one. If omitted, will return a maximum of one. */ maxAlternatives?: number; /** * *Optional* A means to provide context to assist the speech recognition. */ speechContexts?: Schema$GoogleCloudVideointelligenceV1beta2_SpeechContext[]; } /** * Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. */ interface Schema$GoogleCloudVideointelligenceV1beta2_TextAnnotation { /** * All video segments where OCR detected text appears. */ segments?: Schema$GoogleCloudVideointelligenceV1beta2_TextSegment[]; /** * The detected text. */ text?: string; } /** * Config for TEXT_DETECTION. */ interface Schema$GoogleCloudVideointelligenceV1beta2_TextDetectionConfig { /** * Language hint can be specified if the language to be detected is known a priori. It can increase the accuracy of the detection. Language hint must be language code in BCP-47 format. Automatic language detection is performed if no hint is provided. */ languageHints?: string[]; /** * Model to use for text detection. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". */ model?: string; } /** * Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. */ interface Schema$GoogleCloudVideointelligenceV1beta2_TextFrame { /** * Bounding polygon of the detected text for this frame. */ rotatedBoundingBox?: Schema$GoogleCloudVideointelligenceV1beta2_NormalizedBoundingPoly; /** * Timestamp of this frame. */ timeOffset?: string; } /** * Video segment level annotation results for text detection. */ interface Schema$GoogleCloudVideointelligenceV1beta2_TextSegment { /** * Confidence for the track of detected text. It is calculated as the highest over all frames where OCR detected text appears. */ confidence?: number; /** * Information related to the frames where OCR detected text appears. */ frames?: Schema$GoogleCloudVideointelligenceV1beta2_TextFrame[]; /** * Video segment where a text snippet was detected. */ segment?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment; } /** * Annotation progress for a single video. */ interface Schema$GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress { /** * Specifies which feature is being tracked if the request contains more than one features. */ feature?: string; /** * Video file location in [Google Cloud Storage](https://cloud.google.com/storage/). */ inputUri?: string; /** * Approximate percentage processed thus far. Guaranteed to be 100 when fully processed. */ progressPercent?: number; /** * Specifies which segment is being tracked if the request contains more than one segments. */ segment?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment; /** * Time when the request was received. */ startTime?: string; /** * Time of the most recent update. */ updateTime?: string; } /** * Annotation results for a single video. */ interface Schema$GoogleCloudVideointelligenceV1beta2_VideoAnnotationResults { /** * If set, indicates an error. Note that for a single `AnnotateVideoRequest` some videos may succeed and some may fail. */ error?: Schema$GoogleRpc_Status; /** * Explicit content annotation. */ explicitAnnotation?: Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentAnnotation; /** * Label annotations on frame level. There is exactly one element for each unique label. */ frameLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_LabelAnnotation[]; /** * Video file location in [Google Cloud Storage](https://cloud.google.com/storage/). */ inputUri?: string; /** * Annotations for list of objects detected and tracked in video. */ objectAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingAnnotation[]; /** * Topical label annotations on video level or user specified segment level. There is exactly one element for each unique label. */ segmentLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_LabelAnnotation[]; /** * Shot annotations. Each shot is represented as a video segment. */ shotAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment[]; /** * Topical label annotations on shot level. There is exactly one element for each unique label. */ shotLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_LabelAnnotation[]; /** * Speech transcription. */ speechTranscriptions?: Schema$GoogleCloudVideointelligenceV1beta2_SpeechTranscription[]; /** * OCR text detection and tracking. Annotations for list of detected text snippets. Each will have list of frame information associated with it. */ textAnnotations?: Schema$GoogleCloudVideointelligenceV1beta2_TextAnnotation[]; } /** * Video context and/or feature-specific parameters. */ interface Schema$GoogleCloudVideointelligenceV1beta2_VideoContext { /** * Config for EXPLICIT_CONTENT_DETECTION. */ explicitContentDetectionConfig?: Schema$GoogleCloudVideointelligenceV1beta2_ExplicitContentDetectionConfig; /** * Config for LABEL_DETECTION. */ labelDetectionConfig?: Schema$GoogleCloudVideointelligenceV1beta2_LabelDetectionConfig; /** * Config for OBJECT_TRACKING. */ objectTrackingConfig?: Schema$GoogleCloudVideointelligenceV1beta2_ObjectTrackingConfig; /** * Video segments to annotate. The segments may overlap and are not required to be contiguous or span the whole video. If unspecified, each video is treated as a single segment. */ segments?: Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment[]; /** * Config for SHOT_CHANGE_DETECTION. */ shotChangeDetectionConfig?: Schema$GoogleCloudVideointelligenceV1beta2_ShotChangeDetectionConfig; /** * Config for SPEECH_TRANSCRIPTION. */ speechTranscriptionConfig?: Schema$GoogleCloudVideointelligenceV1beta2_SpeechTranscriptionConfig; /** * Config for TEXT_DETECTION. */ textDetectionConfig?: Schema$GoogleCloudVideointelligenceV1beta2_TextDetectionConfig; } /** * Video segment. */ interface Schema$GoogleCloudVideointelligenceV1beta2_VideoSegment { /** * Time-offset, relative to the beginning of the video, corresponding to the end of the segment (inclusive). */ endTimeOffset?: string; /** * Time-offset, relative to the beginning of the video, corresponding to the start of the segment (inclusive). */ startTimeOffset?: string; } /** * Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`. */ interface Schema$GoogleCloudVideointelligenceV1beta2_WordInfo { /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. */ confidence?: number; /** * Time offset relative to the beginning of the audio, and corresponding to the end of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary. */ endTime?: string; /** * Output only. A distinct integer value is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. Value ranges from 1 up to diarization_speaker_count, and is only set if speaker diarization is enabled. */ speakerTag?: number; /** * Time offset relative to the beginning of the audio, and corresponding to the start of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary. */ startTime?: string; /** * The word corresponding to this set of information. */ word?: string; } /** * Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_AnnotateVideoProgress { /** * Progress metadata for all videos specified in `AnnotateVideoRequest`. */ annotationProgress?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress[]; } /** * Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_AnnotateVideoResponse { /** * Annotation results for all videos specified in `AnnotateVideoRequest`. */ annotationResults?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults[]; } /** * Detected entity from video analysis. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_Entity { /** * Textual description, e.g. `Fixed-gear bicycle`. */ description?: string; /** * Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). */ entityId?: string; /** * Language code for `description` in BCP-47 format. */ languageCode?: string; } /** * Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_ExplicitContentAnnotation { /** * All video frames where explicit content was detected. */ frames?: Schema$GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame[]; } /** * Video frame level annotation results for explicit content. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame { /** * Likelihood of the pornography content.. */ pornographyLikelihood?: string; /** * Time-offset, relative to the beginning of the video, corresponding to the video frame for this location. */ timeOffset?: string; } /** * Label annotation. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation { /** * Common categories for the detected entity. E.g. when the label is `Terrier` the category is likely `dog`. And in some cases there might be more than one categories e.g. `Terrier` could also be a `pet`. */ categoryEntities?: Schema$GoogleCloudVideointelligenceV1p1beta1_Entity[]; /** * Detected entity. */ entity?: Schema$GoogleCloudVideointelligenceV1p1beta1_Entity; /** * All video frames where a label was detected. */ frames?: Schema$GoogleCloudVideointelligenceV1p1beta1_LabelFrame[]; /** * All video segments where a label was detected. */ segments?: Schema$GoogleCloudVideointelligenceV1p1beta1_LabelSegment[]; } /** * Video frame level annotation results for label detection. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_LabelFrame { /** * Confidence that the label is accurate. Range: [0, 1]. */ confidence?: number; /** * Time-offset, relative to the beginning of the video, corresponding to the video frame for this location. */ timeOffset?: string; } /** * Video segment level annotation results for label detection. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_LabelSegment { /** * Confidence that the label is accurate. Range: [0, 1]. */ confidence?: number; /** * Video segment where a label was detected. */ segment?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment; } /** * Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1]. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox { /** * Bottom Y coordinate. */ bottom?: number; /** * Left X coordinate. */ left?: number; /** * Right X coordinate. */ right?: number; /** * Top Y coordinate. */ top?: number; } /** * Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like: 0----1 | | 3----2 When it's clockwise rotated 180 degrees around the top-left corner it becomes: 2----3 | | 1----0 and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly { /** * Normalized vertices of the bounding polygon. */ vertices?: Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex[]; } /** * A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex { /** * X coordinate. */ x?: number; /** * Y coordinate. */ y?: number; } /** * Annotations corresponding to one tracked object. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation { /** * Object category's labeling confidence of this track. */ confidence?: number; /** * Entity to specify the object category that this track is labeled as. */ entity?: Schema$GoogleCloudVideointelligenceV1p1beta1_Entity; /** * Information corresponding to all frames where this object track appears. Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame messages in frames. Streaming mode: it can only be one ObjectTrackingFrame message in frames. */ frames?: Schema$GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame[]; /** * Non-streaming batch mode ONLY. Each object track corresponds to one video segment where it appears. */ segment?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment; /** * Streaming mode ONLY. In streaming mode, we do not know the end time of a tracked object before it is completed. Hence, there is no VideoSegment info returned. Instead, we provide a unique identifiable integer track_id so that the customers can correlate the results of the ongoing ObjectTrackAnnotation of the same track_id over time. */ trackId?: string; } /** * Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingFrame { /** * The normalized bounding box location of this object track for the frame. */ normalizedBoundingBox?: Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox; /** * The timestamp of the frame in microseconds. */ timeOffset?: string; } /** * Alternative hypotheses (a.k.a. n-best list). */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative { /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. */ confidence?: number; /** * Transcript text representing the words that the user spoke. */ transcript?: string; /** * Output only. A list of word-specific information for each recognized word. Note: When `enable_speaker_diarization` is true, you will see all the words from the beginning of the audio. */ words?: Schema$GoogleCloudVideointelligenceV1p1beta1_WordInfo[]; } /** * A speech recognition result corresponding to a portion of the audio. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription { /** * May contain one or more recognition hypotheses (up to the maximum specified in `max_alternatives`). These alternatives are ordered in terms of accuracy, with the top (first) alternative being the most probable, as ranked by the recognizer. */ alternatives?: Schema$GoogleCloudVideointelligenceV1p1beta1_SpeechRecognitionAlternative[]; /** * Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the language in this result. This language code was detected to have the most likelihood of being spoken in the audio. */ languageCode?: string; } /** * Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_TextAnnotation { /** * All video segments where OCR detected text appears. */ segments?: Schema$GoogleCloudVideointelligenceV1p1beta1_TextSegment[]; /** * The detected text. */ text?: string; } /** * Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_TextFrame { /** * Bounding polygon of the detected text for this frame. */ rotatedBoundingBox?: Schema$GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingPoly; /** * Timestamp of this frame. */ timeOffset?: string; } /** * Video segment level annotation results for text detection. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_TextSegment { /** * Confidence for the track of detected text. It is calculated as the highest over all frames where OCR detected text appears. */ confidence?: number; /** * Information related to the frames where OCR detected text appears. */ frames?: Schema$GoogleCloudVideointelligenceV1p1beta1_TextFrame[]; /** * Video segment where a text snippet was detected. */ segment?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment; } /** * Annotation progress for a single video. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress { /** * Specifies which feature is being tracked if the request contains more than one features. */ feature?: string; /** * Video file location in [Google Cloud Storage](https://cloud.google.com/storage/). */ inputUri?: string; /** * Approximate percentage processed thus far. Guaranteed to be 100 when fully processed. */ progressPercent?: number; /** * Specifies which segment is being tracked if the request contains more than one segments. */ segment?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment; /** * Time when the request was received. */ startTime?: string; /** * Time of the most recent update. */ updateTime?: string; } /** * Annotation results for a single video. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationResults { /** * If set, indicates an error. Note that for a single `AnnotateVideoRequest` some videos may succeed and some may fail. */ error?: Schema$GoogleRpc_Status; /** * Explicit content annotation. */ explicitAnnotation?: Schema$GoogleCloudVideointelligenceV1p1beta1_ExplicitContentAnnotation; /** * Label annotations on frame level. There is exactly one element for each unique label. */ frameLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation[]; /** * Video file location in [Google Cloud Storage](https://cloud.google.com/storage/). */ inputUri?: string; /** * Annotations for list of objects detected and tracked in video. */ objectAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_ObjectTrackingAnnotation[]; /** * Topical label annotations on video level or user specified segment level. There is exactly one element for each unique label. */ segmentLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation[]; /** * Shot annotations. Each shot is represented as a video segment. */ shotAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment[]; /** * Topical label annotations on shot level. There is exactly one element for each unique label. */ shotLabelAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation[]; /** * Speech transcription. */ speechTranscriptions?: Schema$GoogleCloudVideointelligenceV1p1beta1_SpeechTranscription[]; /** * OCR text detection and tracking. Annotations for list of detected text snippets. Each will have list of frame information associated with it. */ textAnnotations?: Schema$GoogleCloudVideointelligenceV1p1beta1_TextAnnotation[]; } /** * Video segment. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_VideoSegment { /** * Time-offset, relative to the beginning of the video, corresponding to the end of the segment (inclusive). */ endTimeOffset?: string; /** * Time-offset, relative to the beginning of the video, corresponding to the start of the segment (inclusive). */ startTimeOffset?: string; } /** * Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`. */ interface Schema$GoogleCloudVideointelligenceV1p1beta1_WordInfo { /** * Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set. */ confidence?: number; /** * Time offset relative to the beginning of the audio, and corresponding to the end of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary. */ endTime?: string; /** * Output only. A distinct integer value is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. Value ranges from 1 up to diarization_speaker_count, and is only set if speaker diarization is enabled. */ speakerTag?: number; /** * Time offset relative to the beginning of the audio, and corresponding to the start of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary. */ startTime?: string; /** * The word corresponding to this set of information. */ word?: string; } /** * Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1p2beta1_AnnotateVideoProgress { /** * Progress metadata for all videos specified in `AnnotateVideoRequest`. */ annotationProgress?: Schema$GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress[]; } /** * Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service. */ interface Schema$GoogleCloudVideointelligenceV1p2beta1_AnnotateVideoResponse { /** * Annotation results for all videos specified in `AnnotateVideoRequest`. */ annotationResults?: Schema$GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationResults[]; } /** * Detected entity from video analysis. */ interface Schema$GoogleCloudVideointelligenceV1p2beta1_Entity { /** * Textual description, e.g. `Fixed-gear bicycle`. */ description?: string; /** * Opaque entity ID. Some IDs may be available in [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). */ entityId?: string; /** * Language code for `description` in BCP-47 format. */ languageCode?: string; } /** * Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame. */ interface Schema$GoogleCloudVideointelligenceV1p2beta1_ExplicitContentAnnotation { /** * All video frames where explicit content was detected. */ frames?: Schema$GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame[]; } /** * Video frame level annotation results for explicit content. */ interface Schema$GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame { /**