From 6464f30d8ca8a090bf26b099a9734391010ce162 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 13 Feb 2020 13:05:37 -0800 Subject: [PATCH] feat(videointelligence): add person detection and face detection (#5) --- google/__init__.py | 2 +- google/cloud/__init__.py | 2 +- google/cloud/videointelligence.py | 2 +- google/cloud/videointelligence_v1/__init__.py | 13 +- .../cloud/videointelligence_v1/gapic/enums.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- .../video_intelligence_service_client.py | 6 +- .../proto/video_intelligence_pb2.py | 57 +- google/cloud/videointelligence_v1/types.py | 2 +- .../videointelligence_v1beta2/__init__.py | 13 +- .../videointelligence_v1beta2/gapic/enums.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- .../video_intelligence_service_client.py | 8 +- .../proto/video_intelligence.proto | 30 +- .../proto/video_intelligence_pb2.py | 148 +-- .../cloud/videointelligence_v1beta2/types.py | 2 +- .../videointelligence_v1p1beta1/__init__.py | 13 +- .../gapic/enums.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- .../video_intelligence_service_client.py | 8 +- .../proto/video_intelligence.proto | 86 +- .../proto/video_intelligence_pb2.py | 220 +++-- .../videointelligence_v1p1beta1/types.py | 2 +- .../videointelligence_v1p2beta1/__init__.py | 13 +- .../gapic/enums.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- .../video_intelligence_service_client.py | 8 +- .../proto/video_intelligence.proto | 39 +- .../proto/video_intelligence_pb2.py | 184 ++-- .../videointelligence_v1p2beta1/types.py | 2 +- .../videointelligence_v1p3beta1/__init__.py | 13 +- .../gapic/enums.py | 6 +- ...aming_video_intelligence_service_client.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- ...deo_intelligence_service_grpc_transport.py | 2 +- .../video_intelligence_service_client.py | 2 +- .../proto/video_intelligence.proto | 85 +- .../proto/video_intelligence_pb2.py | 926 ++++++++++++++---- .../videointelligence_v1p3beta1/types.py | 2 +- noxfile.py | 1 + synth.metadata | 8 +- ...st_video_intelligence_service_client_v1.py | 8 +- ...deo_intelligence_service_client_v1beta2.py | 2 +- ...o_intelligence_service_client_v1p1beta1.py | 2 +- ...o_intelligence_service_client_v1p2beta1.py | 2 +- ...o_intelligence_service_client_v1p3beta1.py | 2 +- ...o_intelligence_service_client_v1p3beta1.py | 2 +- 47 files changed, 1372 insertions(+), 571 deletions(-) diff --git a/google/__init__.py b/google/__init__.py index 8fcc60e2..9a1b64a6 100644 --- a/google/__init__.py +++ b/google/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py index 8fcc60e2..9a1b64a6 100644 --- a/google/cloud/__init__.py +++ b/google/cloud/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence.py b/google/cloud/videointelligence.py index 276a663b..eeac2fa2 100644 --- a/google/cloud/videointelligence.py +++ b/google/cloud/videointelligence.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1/__init__.py b/google/cloud/videointelligence_v1/__init__.py index 85cd704a..80ce76c6 100644 --- a/google/cloud/videointelligence_v1/__init__.py +++ b/google/cloud/videointelligence_v1/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,12 +16,23 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.videointelligence_v1 import types from google.cloud.videointelligence_v1.gapic import enums from google.cloud.videointelligence_v1.gapic import video_intelligence_service_client +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class VideoIntelligenceServiceClient( video_intelligence_service_client.VideoIntelligenceServiceClient ): diff --git a/google/cloud/videointelligence_v1/gapic/enums.py b/google/cloud/videointelligence_v1/gapic/enums.py index 269d8829..09c442d2 100644 --- a/google/cloud/videointelligence_v1/gapic/enums.py +++ b/google/cloud/videointelligence_v1/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py index 0a74daf4..9abef7a2 100644 --- a/google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py index f872f349..84304d6a 100644 --- a/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -213,9 +213,9 @@ def annotate_video( >>> >>> client = videointelligence_v1.VideoIntelligenceServiceClient() >>> - >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> features_element = enums.Feature.LABEL_DETECTION >>> features = [features_element] + >>> input_uri = 'gs://cloud-samples-data/video/cat.mp4' >>> >>> response = client.annotate_video(input_uri=input_uri, features=features) >>> @@ -288,9 +288,9 @@ def annotate_video( ) request = video_intelligence_pb2.AnnotateVideoRequest( + features=features, input_uri=input_uri, input_content=input_content, - features=features, video_context=video_context, output_uri=output_uri, location_id=location_id, diff --git a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index 869eeae4..2f1b8137 100644 --- a/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -3513,9 +3513,9 @@ dict( DESCRIPTOR=_EXPLICITCONTENTANNOTATION, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual signals only). If - no explicit content has been detected in a frame, no annotations are - present for that frame. + __doc__="""Explicit content annotation (based on per-frame visual + signals only). If no explicit content has been detected in a frame, no + annotations are present for that frame. Attributes: @@ -3533,8 +3533,8 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates are relative - to the original image. Range: [0, 1]. + __doc__="""Normalized bounding box. The normalized vertex coordinates + are relative to the original image. Range: [0, 1]. Attributes: @@ -3688,8 +3688,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation response. Included in the ``response`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -3741,8 +3741,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation progress. Included in the ``metadata`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -3832,8 +3832,8 @@ dict( DESCRIPTOR=_SPEECHCONTEXT, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor specific words and - phrases in the results. + __doc__="""Provides "hints" to the speech recognizer to favor + specific words and phrases in the results. Attributes: @@ -3858,7 +3858,8 @@ dict( DESCRIPTOR=_SPEECHTRANSCRIPTION, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of the audio. + __doc__="""A speech recognition result corresponding to a portion of + the audio. Attributes: @@ -3916,9 +3917,9 @@ dict( DESCRIPTOR=_WORDINFO, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word information is only - included in the response when certain request parameters are set, such - as ``enable_word_time_offsets``. + __doc__="""Word-specific information for recognized words. Word + information is only included in the response when certain request + parameters are set, such as ``enable_word_time_offsets``. Attributes: @@ -3980,10 +3981,11 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be aligned with - axis). Contains list of the corner points in clockwise order starting - from top-left corner. For example, for a rectangular bounding box: When - the text is horizontal it might look like: 0----1 \| \| 3----2 + __doc__="""Normalized bounding polygon for text (that might not be + aligned with axis). Contains list of the corner points in clockwise + order starting from top-left corner. For example, for a rectangular + bounding box: When the text is horizontal it might look like: 0----1 \| + \| 3----2 When it's clockwise rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 @@ -4032,9 +4034,9 @@ dict( DESCRIPTOR=_TEXTFRAME, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation (OCR). Contains - information regarding timestamp and bounding box locations for the - frames containing detected OCR text snippets. + __doc__="""Video frame level annotation results for text annotation + (OCR). Contains information regarding timestamp and bounding box + locations for the frames containing detected OCR text snippets. Attributes: @@ -4054,9 +4056,9 @@ dict( DESCRIPTOR=_TEXTANNOTATION, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Annotations related to one detected OCR text snippet. This will contain - the corresponding text, confidence value, and frame level information - for each detection. + __doc__="""Annotations related to one detected OCR text snippet. This + will contain the corresponding text, confidence value, and frame level + information for each detection. Attributes: @@ -4076,8 +4078,9 @@ dict( DESCRIPTOR=_OBJECTTRACKINGFRAME, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and tracking. This - field stores per frame location, time offset, and confidence. + __doc__="""Video frame level annotations for object detection and + tracking. This field stores per frame location, time offset, and + confidence. Attributes: diff --git a/google/cloud/videointelligence_v1/types.py b/google/cloud/videointelligence_v1/types.py index 6603682e..f224c143 100644 --- a/google/cloud/videointelligence_v1/types.py +++ b/google/cloud/videointelligence_v1/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1beta2/__init__.py b/google/cloud/videointelligence_v1beta2/__init__.py index c180603f..e6ed7610 100644 --- a/google/cloud/videointelligence_v1beta2/__init__.py +++ b/google/cloud/videointelligence_v1beta2/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.videointelligence_v1beta2 import types from google.cloud.videointelligence_v1beta2.gapic import enums @@ -24,6 +26,15 @@ ) +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class VideoIntelligenceServiceClient( video_intelligence_service_client.VideoIntelligenceServiceClient ): diff --git a/google/cloud/videointelligence_v1beta2/gapic/enums.py b/google/cloud/videointelligence_v1beta2/gapic/enums.py index 57070f93..3f9e01cf 100644 --- a/google/cloud/videointelligence_v1beta2/gapic/enums.py +++ b/google/cloud/videointelligence_v1beta2/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py index 0da18022..b1744590 100644 --- a/google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py index 1356475f..7b7023ea 100644 --- a/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -241,19 +241,19 @@ def annotate_video( request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Requested video annotation features. + features (list[~google.cloud.videointelligence_v1beta2.types.Feature]): Required. Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1beta2.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1beta2.types.VideoContext` - output_uri (str): Optional location where the output (in JSON format) should be stored. + output_uri (str): Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs `__. - location_id (str): Optional cloud region where annotation should take place. Supported + location_id (str): Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto b/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto index a69c2579..8e80640e 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; package google.cloud.videointelligence.v1beta2; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -32,6 +35,10 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1beta2"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). @@ -42,6 +49,11 @@ service VideoIntelligenceService { post: "/v1beta2/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -64,31 +76,31 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. message VideoContext { // Video segments to annotate. The segments may overlap and are not required - // to be contiguous or span the whole video. If unspecified, each video - // is treated as a single segment. + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. repeated VideoSegment segments = 1; // Config for LABEL_DETECTION. diff --git a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py index 871a229e..fbfb283e 100644 --- a/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py @@ -17,6 +17,8 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) @@ -33,10 +35,12 @@ "\n*com.google.cloud.videointelligence.v1beta2B\035VideoIntelligenceServiceProtoP\001ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\252\002&Google.Cloud.VideoIntelligence.V1Beta2\312\002&Google\\Cloud\\VideoIntelligence\\V1beta2\352\002)Google::Cloud::VideoIntelligence::V1beta2" ), serialized_pb=_b( - '\nEgoogle/cloud/videointelligence_v1beta2/proto/video_intelligence.proto\x12&google.cloud.videointelligence.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xf9\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x41\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32/.google.cloud.videointelligence.v1beta2.Feature\x12K\n\rvideo_context\x18\x03 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoContext\x12\x12\n\noutput_uri\x18\x04 \x01(\t\x12\x13\n\x0blocation_id\x18\x05 \x01(\t"\xec\x03\n\x0cVideoContext\x12\x46\n\x08segments\x18\x01 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\\\n\x16label_detection_config\x18\x02 \x01(\x0b\x32<.google.cloud.videointelligence.v1beta2.LabelDetectionConfig\x12g\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig\x12q\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x46.google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig\x12Z\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32;.google.cloud.videointelligence.v1beta2.FaceDetectionConfig"\x9a\x01\n\x14LabelDetectionConfig\x12X\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1beta2.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"i\n\x0cLabelSegment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa8\x02\n\x0fLabelAnnotation\x12>\n\x06\x65ntity\x18\x01 \x01(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12I\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12\x46\n\x08segments\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.LabelSegment\x12\x42\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1beta2.LabelFrame"\x9a\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12R\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x32.google.cloud.videointelligence.v1beta2.Likelihood"i\n\x19\x45xplicitContentAnnotation\x12L\n\x06\x66rames\x18\x01 \x03(\x0b\x32<.google.cloud.videointelligence.v1beta2.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"T\n\x0b\x46\x61\x63\x65Segment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment"\x9d\x01\n\tFaceFrame\x12`\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32=.google.cloud.videointelligence.v1beta2.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12\x45\n\x08segments\x18\x02 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1beta2.FaceSegment\x12\x41\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1beta2.FaceFrame"\xdf\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12Z\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12W\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12X\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12P\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1beta2.FaceAnnotation\x12N\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12^\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"s\n\x15\x41nnotateVideoResponse\x12Z\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1beta2.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"u\n\x15\x41nnotateVideoProgress\x12\\\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1beta2.VideoAnnotationProgress*\x86\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xae\x01\n\x18VideoIntelligenceService\x12\x91\x01\n\rAnnotateVideo\x12<.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"#\x82\xd3\xe4\x93\x02\x1d"\x18/v1beta2/videos:annotate:\x01*B\xa4\x02\n*com.google.cloud.videointelligence.v1beta2B\x1dVideoIntelligenceServiceProtoP\x01ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\xaa\x02&Google.Cloud.VideoIntelligence.V1Beta2\xca\x02&Google\\Cloud\\VideoIntelligence\\V1beta2\xea\x02)Google::Cloud::VideoIntelligence::V1beta2b\x06proto3' + '\nEgoogle/cloud/videointelligence_v1beta2/proto/video_intelligence.proto\x12&google.cloud.videointelligence.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x88\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x46\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32/.google.cloud.videointelligence.v1beta2.FeatureB\x03\xe0\x41\x02\x12K\n\rvideo_context\x18\x03 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xec\x03\n\x0cVideoContext\x12\x46\n\x08segments\x18\x01 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\\\n\x16label_detection_config\x18\x02 \x01(\x0b\x32<.google.cloud.videointelligence.v1beta2.LabelDetectionConfig\x12g\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ShotChangeDetectionConfig\x12q\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x46.google.cloud.videointelligence.v1beta2.ExplicitContentDetectionConfig\x12Z\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32;.google.cloud.videointelligence.v1beta2.FaceDetectionConfig"\x9a\x01\n\x14LabelDetectionConfig\x12X\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1beta2.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"i\n\x0cLabelSegment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xa8\x02\n\x0fLabelAnnotation\x12>\n\x06\x65ntity\x18\x01 \x01(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12I\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1beta2.Entity\x12\x46\n\x08segments\x18\x03 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.LabelSegment\x12\x42\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1beta2.LabelFrame"\x9a\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12R\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x32.google.cloud.videointelligence.v1beta2.Likelihood"i\n\x19\x45xplicitContentAnnotation\x12L\n\x06\x66rames\x18\x01 \x03(\x0b\x32<.google.cloud.videointelligence.v1beta2.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"T\n\x0b\x46\x61\x63\x65Segment\x12\x45\n\x07segment\x18\x01 \x01(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment"\x9d\x01\n\tFaceFrame\x12`\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32=.google.cloud.videointelligence.v1beta2.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xad\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12\x45\n\x08segments\x18\x02 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1beta2.FaceSegment\x12\x41\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1beta2.FaceFrame"\xdf\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12Z\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12W\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12X\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1beta2.LabelAnnotation\x12P\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1beta2.FaceAnnotation\x12N\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1beta2.VideoSegment\x12^\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1beta2.ExplicitContentAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"s\n\x15\x41nnotateVideoResponse\x12Z\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1beta2.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"u\n\x15\x41nnotateVideoProgress\x12\\\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1beta2.VideoAnnotationProgress*\x86\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xca\x02\n\x18VideoIntelligenceService\x12\xd7\x01\n\rAnnotateVideo\x12<.google.cloud.videointelligence.v1beta2.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"i\x82\xd3\xe4\x93\x02\x1d"\x18/v1beta2/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xa4\x02\n*com.google.cloud.videointelligence.v1beta2B\x1dVideoIntelligenceServiceProtoP\x01ZWgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence\xaa\x02&Google.Cloud.VideoIntelligence.V1Beta2\xca\x02&Google\\Cloud\\VideoIntelligence\\V1beta2\xea\x02)Google::Cloud::VideoIntelligence::V1beta2b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, @@ -84,8 +88,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3806, - serialized_end=3940, + serialized_start=3879, + serialized_end=4013, ) _sym_db.RegisterEnumDescriptor(_FEATURE) @@ -119,8 +123,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3942, - serialized_end=4056, + serialized_start=4015, + serialized_end=4129, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -156,8 +160,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4058, - serialized_end=4174, + serialized_start=4131, + serialized_end=4247, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -237,7 +241,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -273,7 +277,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -291,7 +295,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -303,8 +307,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=271, - serialized_end=520, + serialized_start=329, + serialized_end=593, ) @@ -414,8 +418,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=523, - serialized_end=1015, + serialized_start=596, + serialized_end=1088, ) @@ -489,8 +493,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1018, - serialized_end=1172, + serialized_start=1091, + serialized_end=1245, ) @@ -528,8 +532,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1174, - serialized_end=1216, + serialized_start=1247, + serialized_end=1289, ) @@ -567,8 +571,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1218, - serialized_end=1265, + serialized_start=1291, + serialized_end=1338, ) @@ -624,8 +628,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1267, - serialized_end=1335, + serialized_start=1340, + serialized_end=1408, ) @@ -681,8 +685,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1337, - serialized_end=1457, + serialized_start=1410, + serialized_end=1530, ) @@ -738,8 +742,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1459, - serialized_end=1564, + serialized_start=1532, + serialized_end=1637, ) @@ -795,8 +799,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1566, - serialized_end=1646, + serialized_start=1639, + serialized_end=1719, ) @@ -870,8 +874,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1648, - serialized_end=1719, + serialized_start=1721, + serialized_end=1792, ) @@ -963,8 +967,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1722, - serialized_end=2018, + serialized_start=1795, + serialized_end=2091, ) @@ -1020,8 +1024,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2021, - serialized_end=2175, + serialized_start=2094, + serialized_end=2248, ) @@ -1059,8 +1063,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2177, - serialized_end=2282, + serialized_start=2250, + serialized_end=2355, ) @@ -1152,8 +1156,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2284, - serialized_end=2365, + serialized_start=2357, + serialized_end=2438, ) @@ -1191,8 +1195,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2367, - serialized_end=2451, + serialized_start=2440, + serialized_end=2524, ) @@ -1248,8 +1252,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2454, - serialized_end=2611, + serialized_start=2527, + serialized_end=2684, ) @@ -1323,8 +1327,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2614, - serialized_end=2787, + serialized_start=2687, + serialized_end=2860, ) @@ -1488,8 +1492,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2790, - serialized_end=3397, + serialized_start=2863, + serialized_end=3470, ) @@ -1527,8 +1531,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3399, - serialized_end=3514, + serialized_start=3472, + serialized_end=3587, ) @@ -1620,8 +1624,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3517, - serialized_end=3684, + serialized_start=3590, + serialized_end=3757, ) @@ -1659,8 +1663,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3686, - serialized_end=3803, + serialized_start=3759, + serialized_end=3876, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -1800,11 +1804,11 @@ specified via ``input_uri``. If set, ``input_uri`` should be unset. features: - Requested video annotation features. + Required. Requested video annotation features. video_context: Additional video context and/or feature-specific parameters. output_uri: - Optional location where the output (in JSON format) should be + Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: @@ -1813,7 +1817,7 @@ ]). For more information, see `Request URIs `__. location_id: - Optional cloud region where annotation should take place. + Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe- west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. @@ -1967,7 +1971,8 @@ dict( DESCRIPTOR=_LABELSEGMENT, __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label detection. + __doc__="""Video segment level annotation results for label + detection. Attributes: @@ -2080,9 +2085,9 @@ dict( DESCRIPTOR=_EXPLICITCONTENTANNOTATION, __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual signals only). If - no explicit content has been detected in a frame, no annotations are - present for that frame. + __doc__="""Explicit content annotation (based on per-frame visual + signals only). If no explicit content has been detected in a frame, no + annotations are present for that frame. Attributes: @@ -2100,8 +2105,8 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates are relative - to the original image. Range: [0, 1]. + __doc__="""Normalized bounding box. The normalized vertex coordinates + are relative to the original image. Range: [0, 1]. Attributes: @@ -2227,8 +2232,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation response. Included in the ``response`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2274,8 +2279,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, __module__="google.cloud.videointelligence_v1beta2.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation progress. Included in the ``metadata`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2291,15 +2296,20 @@ DESCRIPTOR._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["features"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None _VIDEOINTELLIGENCESERVICE = _descriptor.ServiceDescriptor( name="VideoIntelligenceService", full_name="google.cloud.videointelligence.v1beta2.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=4177, - serialized_end=4351, + serialized_options=_b( + "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" + ), + serialized_start=4250, + serialized_end=4580, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", @@ -2309,7 +2319,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=_b( - '\202\323\344\223\002\035"\030/v1beta2/videos:annotate:\001*' + '\202\323\344\223\002\035"\030/v1beta2/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' ), ) ], diff --git a/google/cloud/videointelligence_v1beta2/types.py b/google/cloud/videointelligence_v1beta2/types.py index 88b29b89..91a59c15 100644 --- a/google/cloud/videointelligence_v1beta2/types.py +++ b/google/cloud/videointelligence_v1beta2/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p1beta1/__init__.py b/google/cloud/videointelligence_v1p1beta1/__init__.py index f34c79eb..cfb54486 100644 --- a/google/cloud/videointelligence_v1p1beta1/__init__.py +++ b/google/cloud/videointelligence_v1p1beta1/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.videointelligence_v1p1beta1 import types from google.cloud.videointelligence_v1p1beta1.gapic import enums @@ -24,6 +26,15 @@ ) +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class VideoIntelligenceServiceClient( video_intelligence_service_client.VideoIntelligenceServiceClient ): diff --git a/google/cloud/videointelligence_v1p1beta1/gapic/enums.py b/google/cloud/videointelligence_v1p1beta1/gapic/enums.py index 534d822b..337df8ee 100644 --- a/google/cloud/videointelligence_v1p1beta1/gapic/enums.py +++ b/google/cloud/videointelligence_v1p1beta1/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py index 00d033d6..07f7988c 100644 --- a/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py index 0e6276d6..51d50777 100644 --- a/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -243,19 +243,19 @@ def annotate_video( request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1p1beta1.types.Feature]): Requested video annotation features. + features (list[~google.cloud.videointelligence_v1p1beta1.types.Feature]): Required. Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1p1beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p1beta1.types.VideoContext` - output_uri (str): Optional location where the output (in JSON format) should be stored. + output_uri (str): Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs `__. - location_id (str): Optional cloud region where annotation should take place. Supported + location_id (str): Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto index 115f362b..44d3ca64 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; package google.cloud.videointelligence.v1p1beta1; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -32,16 +35,23 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1p1beta1"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1p1beta1/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -51,10 +61,10 @@ message AnnotateVideoRequest { // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. string input_uri = 1; @@ -64,24 +74,24 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. @@ -285,60 +295,60 @@ message AnnotateVideoProgress { // Config for SPEECH_TRANSCRIPTION. message SpeechTranscriptionConfig { - // *Required* The language of the supplied audio as a + // Required. *Required* The language of the supplied audio as a // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. // Example: "en-US". // See [Language Support](https://cloud.google.com/speech/docs/languages) // for a list of the currently supported language codes. - string language_code = 1; + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; - // *Optional* Maximum number of recognition hypotheses to be returned. + // Optional. Maximum number of recognition hypotheses to be returned. // Specifically, the maximum number of `SpeechRecognitionAlternative` messages - // within each `SpeechRecognitionResult`. The server may return fewer than + // within each `SpeechTranscription`. The server may return fewer than // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will // return a maximum of one. If omitted, will return a maximum of one. - int32 max_alternatives = 2; + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* If set to `true`, the server will attempt to filter out + // Optional. If set to `true`, the server will attempt to filter out // profanities, replacing all but the initial character in each filtered word // with asterisks, e.g. "f***". If set to `false` or omitted, profanities // won't be filtered out. - bool filter_profanity = 3; + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* A means to provide context to assist the speech recognition. - repeated SpeechContext speech_contexts = 4; + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* If 'true', adds punctuation to recognition result hypotheses. + // Optional. If 'true', adds punctuation to recognition result hypotheses. // This feature is only available in select languages. Setting this for // requests in other languages has no effect at all. The default 'false' value // does not add punctuation to result hypotheses. NOTE: "This is currently // offered as an experimental service, complimentary to all users. In the // future this may be exclusively available as a premium feature." - bool enable_automatic_punctuation = 5; + bool enable_automatic_punctuation = 5 [(google.api.field_behavior) = OPTIONAL]; - // *Optional* For file formats, such as MXF or MKV, supporting multiple audio + // Optional. For file formats, such as MXF or MKV, supporting multiple audio // tracks, specify up to two tracks. Default: track 0. - repeated int32 audio_tracks = 6; + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; } // Provides "hints" to the speech recognizer to favor specific words and phrases // in the results. message SpeechContext { - // *Optional* A list of strings containing words and phrases "hints" so that + // Optional. A list of strings containing words and phrases "hints" so that // the speech recognition is more likely to recognize them. This can be used // to improve the accuracy for specific words and phrases, for example, if // specific commands are typically spoken by the user. This can also be used // to add additional words to the vocabulary of the recognizer. See // [usage limits](https://cloud.google.com/speech/limits#content). - repeated string phrases = 1; + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; } // A speech recognition result corresponding to a portion of the audio. message SpeechTranscription { - // Output only. May contain one or more recognition hypotheses (up to the - // maximum specified in `max_alternatives`). - // These alternatives are ordered in terms of accuracy, with the top (first) - // alternative being the most probable, as ranked by the recognizer. + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. repeated SpeechRecognitionAlternative alternatives = 1; } @@ -349,11 +359,11 @@ message SpeechRecognitionAlternative { // Output only. The confidence estimate between 0.0 and 1.0. A higher number // indicates an estimated greater likelihood that the recognized words are - // correct. This field is typically provided only for the top hypothesis, and - // only for `is_final=true` results. Clients should not rely on the - // `confidence` field as it is not guaranteed to be accurate or consistent. + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. // The default of 0.0 is a sentinel value indicating `confidence` was not set. - float confidence = 2; + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. A list of word-specific information for each recognized word. repeated WordInfo words = 3; diff --git a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py index 52b38006..650598ba 100644 --- a/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py @@ -17,6 +17,8 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) @@ -33,10 +35,12 @@ "\n,com.google.cloud.videointelligence.v1p1beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P1Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p1beta1\352\002+Google::Cloud::VideoIntelligence::V1p1beta1" ), serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfd\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x43\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p1beta1.Feature\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoContext\x12\x12\n\noutput_uri\x18\x04 \x01(\t\x12\x13\n\x0blocation_id\x18\x05 \x01(\t"\x82\x04\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p1beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p1beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p1beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame"\xf5\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p1beta1.SpeechTranscription\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress"\xf4\x01\n\x19SpeechTranscriptionConfig\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x18\n\x10max_alternatives\x18\x02 \x01(\x05\x12\x18\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x12P\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p1beta1.SpeechContext\x12$\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x12\x14\n\x0c\x61udio_tracks\x18\x06 \x03(\x05" \n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t"s\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative"\x89\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x41\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p1beta1.WordInfo"t\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t*\x8c\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xb2\x01\n\x18VideoIntelligenceService\x12\x95\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"%\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p1beta1/videos:annotate:\x01*B\xae\x02\n,com.google.cloud.videointelligence.v1p1beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P1Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p1beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p1beta1b\x06proto3' + '\nGgoogle/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p1beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x82\x04\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p1beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p1beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p1beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p1beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame"\xf5\x04\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p1beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p1beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p1beta1.SpeechTranscription\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress"\x92\x02\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p1beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"s\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative"\x8e\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x41\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p1beta1.WordInfo"t\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t*\x8c\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p1beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p1beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P1Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p1beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p1beta1b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, @@ -88,8 +92,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3959, - serialized_end=4099, + serialized_start=4072, + serialized_end=4212, ) _sym_db.RegisterEnumDescriptor(_FEATURE) @@ -123,8 +127,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4101, - serialized_end=4215, + serialized_start=4214, + serialized_end=4328, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -160,8 +164,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4217, - serialized_end=4333, + serialized_start=4330, + serialized_end=4446, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -241,7 +245,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -277,7 +281,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -295,7 +299,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -307,8 +311,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=275, - serialized_end=528, + serialized_start=333, + serialized_end=601, ) @@ -418,8 +422,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=531, - serialized_end=1045, + serialized_start=604, + serialized_end=1118, ) @@ -493,8 +497,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1048, - serialized_end=1204, + serialized_start=1121, + serialized_end=1277, ) @@ -532,8 +536,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1206, - serialized_end=1248, + serialized_start=1279, + serialized_end=1321, ) @@ -571,8 +575,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1250, - serialized_end=1297, + serialized_start=1323, + serialized_end=1370, ) @@ -628,8 +632,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1299, - serialized_end=1419, + serialized_start=1372, + serialized_end=1492, ) @@ -685,8 +689,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1421, - serialized_end=1528, + serialized_start=1494, + serialized_end=1601, ) @@ -742,8 +746,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1530, - serialized_end=1610, + serialized_start=1603, + serialized_end=1683, ) @@ -817,8 +821,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1612, - serialized_end=1683, + serialized_start=1685, + serialized_end=1756, ) @@ -910,8 +914,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1686, - serialized_end=1990, + serialized_start=1759, + serialized_end=2063, ) @@ -967,8 +971,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1993, - serialized_end=2149, + serialized_start=2066, + serialized_end=2222, ) @@ -1006,8 +1010,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2151, - serialized_end=2258, + serialized_start=2224, + serialized_end=2331, ) @@ -1171,8 +1175,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2261, - serialized_end=2890, + serialized_start=2334, + serialized_end=2963, ) @@ -1210,8 +1214,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2892, - serialized_end=3009, + serialized_start=2965, + serialized_end=3082, ) @@ -1303,8 +1307,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3012, - serialized_end=3179, + serialized_start=3085, + serialized_end=3252, ) @@ -1342,8 +1346,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3181, - serialized_end=3300, + serialized_start=3254, + serialized_end=3373, ) @@ -1369,7 +1373,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1387,7 +1391,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1405,7 +1409,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1423,7 +1427,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1441,7 +1445,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1459,7 +1463,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -1471,8 +1475,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3303, - serialized_end=3547, + serialized_start=3376, + serialized_end=3650, ) @@ -1498,7 +1502,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ) ], @@ -1510,8 +1514,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3549, - serialized_end=3581, + serialized_start=3652, + serialized_end=3689, ) @@ -1549,8 +1553,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3583, - serialized_end=3698, + serialized_start=3691, + serialized_end=3806, ) @@ -1594,7 +1598,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1624,8 +1628,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3701, - serialized_end=3838, + serialized_start=3809, + serialized_end=3951, ) @@ -1699,8 +1703,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3840, - serialized_end=3956, + serialized_start=3953, + serialized_end=4069, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -1848,11 +1852,11 @@ specified via ``input_uri``. If set, ``input_uri`` should be unset. features: - Requested video annotation features. + Required. Requested video annotation features. video_context: Additional video context and/or feature-specific parameters. output_uri: - Optional location where the output (in JSON format) should be + Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: @@ -1861,7 +1865,7 @@ ]). For more information, see `Request URIs `__. location_id: - Optional cloud region where annotation should take place. + Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe- west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. @@ -1993,7 +1997,8 @@ dict( DESCRIPTOR=_LABELSEGMENT, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label detection. + __doc__="""Video segment level annotation results for label + detection. Attributes: @@ -2106,9 +2111,9 @@ dict( DESCRIPTOR=_EXPLICITCONTENTANNOTATION, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual signals only). If - no explicit content has been detected in a frame, no annotations are - present for that frame. + __doc__="""Explicit content annotation (based on per-frame visual + signals only). If no explicit content has been detected in a frame, no + annotations are present for that frame. Attributes: @@ -2164,8 +2169,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation response. Included in the ``response`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2211,8 +2216,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation progress. Included in the ``metadata`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2237,30 +2242,30 @@ Attributes: language_code: - *Required* The language of the supplied audio as a `BCP-47 - `__ language - tag. Example: "en-US". See `Language Support + Required. *Required* The language of the supplied audio as a + `BCP-47 `__ + language tag. Example: "en-US". See `Language Support `__ for a list of the currently supported language codes. max_alternatives: - *Optional* Maximum number of recognition hypotheses to be + Optional. Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of ``SpeechRecognitionAlternative`` messages within each - ``SpeechRecognitionResult``. The server may return fewer than + ``SpeechTranscription``. The server may return fewer than ``max_alternatives``. Valid values are ``0``-``30``. A value of ``0`` or ``1`` will return a maximum of one. If omitted, will return a maximum of one. filter_profanity: - *Optional* If set to ``true``, the server will attempt to + Optional. If set to ``true``, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f\*\*\*". If set to ``false`` or omitted, profanities won't be filtered out. speech_contexts: - *Optional* A means to provide context to assist the speech + Optional. A means to provide context to assist the speech recognition. enable_automatic_punctuation: - *Optional* If 'true', adds punctuation to recognition result + Optional. If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add @@ -2269,7 +2274,7 @@ users. In the future this may be exclusively available as a premium feature." audio_tracks: - *Optional* For file formats, such as MXF or MKV, supporting + Optional. For file formats, such as MXF or MKV, supporting multiple audio tracks, specify up to two tracks. Default: track 0. """, @@ -2284,13 +2289,13 @@ dict( DESCRIPTOR=_SPEECHCONTEXT, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor specific words and - phrases in the results. + __doc__="""Provides "hints" to the speech recognizer to favor + specific words and phrases in the results. Attributes: phrases: - *Optional* A list of strings containing words and phrases + Optional. A list of strings containing words and phrases "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands @@ -2310,15 +2315,16 @@ dict( DESCRIPTOR=_SPEECHTRANSCRIPTION, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of the audio. + __doc__="""A speech recognition result corresponding to a portion of + the audio. Attributes: alternatives: - Output only. May contain one or more recognition hypotheses - (up to the maximum specified in ``max_alternatives``). These - alternatives are ordered in terms of accuracy, with the top - (first) alternative being the most probable, as ranked by the + May contain one or more recognition hypotheses (up to the + maximum specified in ``max_alternatives``). These alternatives + are ordered in terms of accuracy, with the top (first) + alternative being the most probable, as ranked by the recognizer. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p1beta1.SpeechTranscription) @@ -2342,11 +2348,10 @@ confidence: Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that - the recognized words are correct. This field is typically - provided only for the top hypothesis, and only for - ``is_final=true`` results. Clients should not rely on the - ``confidence`` field as it is not guaranteed to be accurate or - consistent. The default of 0.0 is a sentinel value indicating + the recognized words are correct. This field is set only for + the top alternative. This field is not guaranteed to be + accurate and users should not rely on it to be always + provided. The default of 0.0 is a sentinel value indicating ``confidence`` was not set. words: Output only. A list of word-specific information for each @@ -2363,9 +2368,9 @@ dict( DESCRIPTOR=_WORDINFO, __module__="google.cloud.videointelligence_v1p1beta1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word information is only - included in the response when certain request parameters are set, such - as ``enable_word_time_offsets``. + __doc__="""Word-specific information for recognized words. Word + information is only included in the response when certain request + parameters are set, such as ``enable_word_time_offsets``. Attributes: @@ -2392,15 +2397,30 @@ DESCRIPTOR._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["features"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["language_code"]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["max_alternatives"]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["filter_profanity"]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["speech_contexts"]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name[ + "enable_automatic_punctuation" +]._options = None +_SPEECHTRANSCRIPTIONCONFIG.fields_by_name["audio_tracks"]._options = None +_SPEECHCONTEXT.fields_by_name["phrases"]._options = None +_SPEECHRECOGNITIONALTERNATIVE.fields_by_name["confidence"]._options = None _VIDEOINTELLIGENCESERVICE = _descriptor.ServiceDescriptor( name="VideoIntelligenceService", full_name="google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=4336, - serialized_end=4514, + serialized_options=_b( + "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" + ), + serialized_start=4449, + serialized_end=4783, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", @@ -2410,7 +2430,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=_b( - '\202\323\344\223\002\037"\032/v1p1beta1/videos:annotate:\001*' + '\202\323\344\223\002\037"\032/v1p1beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' ), ) ], diff --git a/google/cloud/videointelligence_v1p1beta1/types.py b/google/cloud/videointelligence_v1p1beta1/types.py index a1ff8767..ea57ce8f 100644 --- a/google/cloud/videointelligence_v1p1beta1/types.py +++ b/google/cloud/videointelligence_v1p1beta1/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p2beta1/__init__.py b/google/cloud/videointelligence_v1p2beta1/__init__.py index bb08b22f..65d0bec6 100644 --- a/google/cloud/videointelligence_v1p2beta1/__init__.py +++ b/google/cloud/videointelligence_v1p2beta1/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.videointelligence_v1p2beta1 import types from google.cloud.videointelligence_v1p2beta1.gapic import enums @@ -24,6 +26,15 @@ ) +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class VideoIntelligenceServiceClient( video_intelligence_service_client.VideoIntelligenceServiceClient ): diff --git a/google/cloud/videointelligence_v1p2beta1/gapic/enums.py b/google/cloud/videointelligence_v1p2beta1/gapic/enums.py index 4d0ad701..71ecdf88 100644 --- a/google/cloud/videointelligence_v1p2beta1/gapic/enums.py +++ b/google/cloud/videointelligence_v1p2beta1/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py index b8eb33c1..32f3b061 100644 --- a/google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py index 323f959a..acc1fd42 100644 --- a/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -243,19 +243,19 @@ def annotate_video( request as ``input_content``. If set, ``input_content`` should be unset. input_content (bytes): The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. - features (list[~google.cloud.videointelligence_v1p2beta1.types.Feature]): Requested video annotation features. + features (list[~google.cloud.videointelligence_v1p2beta1.types.Feature]): Required. Requested video annotation features. video_context (Union[dict, ~google.cloud.videointelligence_v1p2beta1.types.VideoContext]): Additional video context and/or feature-specific parameters. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.videointelligence_v1p2beta1.types.VideoContext` - output_uri (str): Optional location where the output (in JSON format) should be stored. + output_uri (str): Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For more information, see `Request URIs `__. - location_id (str): Optional cloud region where annotation should take place. Supported + location_id (str): Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto index 0a16e7af..044233b0 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ syntax = "proto3"; package google.cloud.videointelligence.v1p2beta1; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -33,16 +35,23 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1p2beta1/videos:annotate" body: "*" }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; } } @@ -52,10 +61,10 @@ message AnnotateVideoRequest { // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. string input_uri = 1; @@ -65,24 +74,24 @@ message AnnotateVideoRequest { // If set, `input_uri` should be unset. bytes input_content = 6; - // Requested video annotation features. - repeated Feature features = 2; + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; // Additional video context and/or feature-specific parameters. VideoContext video_context = 3; - // Optional location where the output (in JSON format) should be stored. + // Optional. Location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). - string output_uri = 4; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; - // Optional cloud region where annotation should take place. Supported cloud + // Optional. Cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. - string location_id = 5; + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; } // Video context and/or feature-specific parameters. diff --git a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py index 73e3c2d5..12f11e1a 100644 --- a/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py @@ -17,6 +17,8 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) @@ -33,10 +35,12 @@ "\n,com.google.cloud.videointelligence.v1p2beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P2Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p2beta1\352\002+Google::Cloud::VideoIntelligence::V1p2beta1" ), serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p2beta1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xfd\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12\x43\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p2beta1.Feature\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoContext\x12\x12\n\noutput_uri\x18\x04 \x01(\t\x12\x13\n\x0blocation_id\x18\x05 \x01(\t"\xf6\x03\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.TextDetectionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p2beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"-\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p2beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p2beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcb\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p2beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p2beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p2beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p2beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x88\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame\x12G\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment*\x9b\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xb2\x01\n\x18VideoIntelligenceService\x12\x95\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"%\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p2beta1/videos:annotate:\x01*B\xae\x02\n,com.google.cloud.videointelligence.v1p2beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P2Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p2beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p2beta1b\x06proto3' + '\nGgoogle/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p2beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p2beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xf6\x03\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p2beta1.ExplicitContentDetectionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.TextDetectionConfig"\x9c\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p2beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"-\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p2beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p2beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p2beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcb\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p2beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p2beta1.ExplicitContentAnnotation\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p2beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p2beta1.VideoAnnotationProgress"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p2beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p2beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p2beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x88\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p2beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame\x12G\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p2beta1.VideoSegment*\x9b\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p2beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xae\x02\n,com.google.cloud.videointelligence.v1p2beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P2Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p2beta1\xea\x02+Google::Cloud::VideoIntelligence::V1p2beta1b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, @@ -91,8 +95,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4531, - serialized_end=4686, + serialized_start=4604, + serialized_end=4759, ) _sym_db.RegisterEnumDescriptor(_FEATURE) @@ -126,8 +130,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4688, - serialized_end=4802, + serialized_start=4761, + serialized_end=4875, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -163,8 +167,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4804, - serialized_end=4920, + serialized_start=4877, + serialized_end=4993, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -245,7 +249,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -281,7 +285,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -299,7 +303,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -311,8 +315,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=275, - serialized_end=528, + serialized_start=333, + serialized_end=601, ) @@ -422,8 +426,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=531, - serialized_end=1033, + serialized_start=604, + serialized_end=1106, ) @@ -497,8 +501,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1036, - serialized_end=1192, + serialized_start=1109, + serialized_end=1265, ) @@ -536,8 +540,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1194, - serialized_end=1236, + serialized_start=1267, + serialized_end=1309, ) @@ -575,8 +579,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1238, - serialized_end=1285, + serialized_start=1311, + serialized_end=1358, ) @@ -614,8 +618,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1287, - serialized_end=1332, + serialized_start=1360, + serialized_end=1405, ) @@ -671,8 +675,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1334, - serialized_end=1454, + serialized_start=1407, + serialized_end=1527, ) @@ -728,8 +732,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1456, - serialized_end=1563, + serialized_start=1529, + serialized_end=1636, ) @@ -785,8 +789,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1565, - serialized_end=1645, + serialized_start=1638, + serialized_end=1718, ) @@ -860,8 +864,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1647, - serialized_end=1718, + serialized_start=1720, + serialized_end=1791, ) @@ -953,8 +957,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1721, - serialized_end=2025, + serialized_start=1794, + serialized_end=2098, ) @@ -1010,8 +1014,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2028, - serialized_end=2184, + serialized_start=2101, + serialized_end=2257, ) @@ -1049,8 +1053,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2186, - serialized_end=2293, + serialized_start=2259, + serialized_end=2366, ) @@ -1142,8 +1146,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2295, - serialized_end=2376, + serialized_start=2368, + serialized_end=2449, ) @@ -1325,8 +1329,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2379, - serialized_end=3094, + serialized_start=2452, + serialized_end=3167, ) @@ -1364,8 +1368,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3096, - serialized_end=3213, + serialized_start=3169, + serialized_end=3286, ) @@ -1457,8 +1461,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3216, - serialized_end=3383, + serialized_start=3289, + serialized_end=3456, ) @@ -1496,8 +1500,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3385, - serialized_end=3504, + serialized_start=3458, + serialized_end=3577, ) @@ -1553,8 +1557,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3506, - serialized_end=3546, + serialized_start=3579, + serialized_end=3619, ) @@ -1592,8 +1596,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3548, - serialized_end=3650, + serialized_start=3621, + serialized_end=3723, ) @@ -1667,8 +1671,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3653, - serialized_end=3828, + serialized_start=3726, + serialized_end=3901, ) @@ -1724,8 +1728,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3831, - serialized_end=3986, + serialized_start=3904, + serialized_end=4059, ) @@ -1781,8 +1785,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3988, - serialized_end=4091, + serialized_start=4061, + serialized_end=4164, ) @@ -1838,8 +1842,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4094, - serialized_end=4261, + serialized_start=4167, + serialized_end=4334, ) @@ -1931,8 +1935,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4264, - serialized_end=4528, + serialized_start=4337, + serialized_end=4601, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -2087,11 +2091,11 @@ specified via ``input_uri``. If set, ``input_uri`` should be unset. features: - Requested video annotation features. + Required. Requested video annotation features. video_context: Additional video context and/or feature-specific parameters. output_uri: - Optional location where the output (in JSON format) should be + Optional. Location where the output (in JSON format) should be stored. Currently, only `Google Cloud Storage `__ URIs are supported, which must be specified in the following format: @@ -2100,7 +2104,7 @@ ]). For more information, see `Request URIs `__. location_id: - Optional cloud region where annotation should take place. + Optional. Cloud region where annotation should take place. Supported cloud regions: ``us-east1``, ``us-west1``, ``europe- west1``, ``asia-east1``. If no region is specified, a region will be determined based on video file location. @@ -2254,7 +2258,8 @@ dict( DESCRIPTOR=_LABELSEGMENT, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label detection. + __doc__="""Video segment level annotation results for label + detection. Attributes: @@ -2367,9 +2372,9 @@ dict( DESCRIPTOR=_EXPLICITCONTENTANNOTATION, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual signals only). If - no explicit content has been detected in a frame, no annotations are - present for that frame. + __doc__="""Explicit content annotation (based on per-frame visual + signals only). If no explicit content has been detected in a frame, no + annotations are present for that frame. Attributes: @@ -2387,8 +2392,8 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates are relative - to the original image. Range: [0, 1]. + __doc__="""Normalized bounding box. The normalized vertex coordinates + are relative to the original image. Range: [0, 1]. Attributes: @@ -2454,8 +2459,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation response. Included in the ``response`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2501,8 +2506,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation progress. Included in the ``metadata`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -2540,10 +2545,11 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be aligned with - axis). Contains list of the corner points in clockwise order starting - from top-left corner. For example, for a rectangular bounding box: When - the text is horizontal it might look like: 0----1 \| \| 3----2 + __doc__="""Normalized bounding polygon for text (that might not be + aligned with axis). Contains list of the corner points in clockwise + order starting from top-left corner. For example, for a rectangular + bounding box: When the text is horizontal it might look like: 0----1 \| + \| 3----2 When it's clockwise rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 @@ -2592,9 +2598,9 @@ dict( DESCRIPTOR=_TEXTFRAME, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation (OCR). Contains - information regarding timestamp and bounding box locations for the - frames containing detected OCR text snippets. + __doc__="""Video frame level annotation results for text annotation + (OCR). Contains information regarding timestamp and bounding box + locations for the frames containing detected OCR text snippets. Attributes: @@ -2636,8 +2642,9 @@ dict( DESCRIPTOR=_OBJECTTRACKINGFRAME, __module__="google.cloud.videointelligence_v1p2beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and tracking. This - field stores per frame location, time offset, and confidence. + __doc__="""Video frame level annotations for object detection and + tracking. This field stores per frame location, time offset, and + confidence. Attributes: @@ -2681,15 +2688,20 @@ DESCRIPTOR._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["features"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None +_ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None _VIDEOINTELLIGENCESERVICE = _descriptor.ServiceDescriptor( name="VideoIntelligenceService", full_name="google.cloud.videointelligence.v1p2beta1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=4923, - serialized_end=5101, + serialized_options=_b( + "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" + ), + serialized_start=4996, + serialized_end=5330, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", @@ -2699,7 +2711,7 @@ input_type=_ANNOTATEVIDEOREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, serialized_options=_b( - '\202\323\344\223\002\037"\032/v1p2beta1/videos:annotate:\001*' + '\202\323\344\223\002\037"\032/v1p2beta1/videos:annotate:\001*\332A\022input_uri,features\312A.\n\025AnnotateVideoResponse\022\025AnnotateVideoProgress' ), ) ], diff --git a/google/cloud/videointelligence_v1p2beta1/types.py b/google/cloud/videointelligence_v1p2beta1/types.py index 0b70adb8..67ba6696 100644 --- a/google/cloud/videointelligence_v1p2beta1/types.py +++ b/google/cloud/videointelligence_v1p2beta1/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p3beta1/__init__.py b/google/cloud/videointelligence_v1p3beta1/__init__.py index fd8f0071..75bb4ebe 100644 --- a/google/cloud/videointelligence_v1p3beta1/__init__.py +++ b/google/cloud/videointelligence_v1p3beta1/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.videointelligence_v1p3beta1 import types from google.cloud.videointelligence_v1p3beta1.gapic import enums @@ -27,6 +29,15 @@ ) +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class VideoIntelligenceServiceClient( video_intelligence_service_client.VideoIntelligenceServiceClient ): diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/enums.py b/google/cloud/videointelligence_v1p3beta1/gapic/enums.py index 4aa081ac..f32e7a2e 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/enums.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -28,22 +28,26 @@ class Feature(enum.IntEnum): LABEL_DETECTION (int): Label detection. Detect objects, such as dog or flower. SHOT_CHANGE_DETECTION (int): Shot change detection. EXPLICIT_CONTENT_DETECTION (int): Explicit content detection. + FACE_DETECTION (int): Human face detection. SPEECH_TRANSCRIPTION (int): Speech transcription. TEXT_DETECTION (int): OCR text detection and tracking. OBJECT_TRACKING (int): Object detection and tracking. LOGO_RECOGNITION (int): Logo detection, tracking, and recognition. CELEBRITY_RECOGNITION (int): Celebrity recognition. + PERSON_DETECTION (int): Person detection. """ FEATURE_UNSPECIFIED = 0 LABEL_DETECTION = 1 SHOT_CHANGE_DETECTION = 2 EXPLICIT_CONTENT_DETECTION = 3 + FACE_DETECTION = 4 SPEECH_TRANSCRIPTION = 6 TEXT_DETECTION = 7 OBJECT_TRACKING = 9 LOGO_RECOGNITION = 12 CELEBRITY_RECOGNITION = 13 + PERSON_DETECTION = 14 class LabelDetectionMode(enum.IntEnum): diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py index 6445359b..e89daf5a 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py index d51a8f14..bc2cba2f 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py b/google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py index ecd2f03c..1b69eb40 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py index 0175c889..d94df648 100644 --- a/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py +++ b/google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto index 1203b315..942f63be 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -124,12 +124,18 @@ message VideoContext { // Config for EXPLICIT_CONTENT_DETECTION. ExplicitContentDetectionConfig explicit_content_detection_config = 4; + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + // Config for SPEECH_TRANSCRIPTION. SpeechTranscriptionConfig speech_transcription_config = 6; // Config for TEXT_DETECTION. TextDetectionConfig text_detection_config = 8; + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + // Config for OBJECT_TRACKING. ObjectTrackingConfig object_tracking_config = 13; } @@ -192,6 +198,38 @@ message ExplicitContentDetectionConfig { string model = 1; } +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes be included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes be included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair + // color (black, blonde, etc), hair length (long, short, bald), etc. + // Ignored if 'include_bounding_boxes' is false. + bool include_attributes = 3; +} + // Config for TEXT_DETECTION. message TextDetectionConfig { // Language hint can be specified if the language to be detected is known a @@ -318,6 +356,10 @@ message TimestampedObject { // Optional. The attributes of the object in the bounding box. repeated DetectedAttribute attributes = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; } // A track of an object instance. @@ -392,6 +434,35 @@ message CelebrityRecognitionAnnotation { repeated CelebrityTrack celebrity_tracks = 1; } +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, i.e. left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The trackes that a person is detected. + repeated Track tracks = 1; +} + // Annotation results for a single video. message VideoAnnotationResults { // Video file location in @@ -428,6 +499,9 @@ message VideoAnnotationResults { // There is exactly one element for each unique label. repeated LabelAnnotation frame_label_annotations = 4; + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + // Shot annotations. Each shot is represented as a video segment. repeated VideoSegment shot_annotations = 6; @@ -448,6 +522,9 @@ message VideoAnnotationResults { // Annotations for list of logos detected, tracked and recognized in video. repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + // Celebrity recognition annotations. CelebrityRecognitionAnnotation celebrity_recognition_annotations = 21; @@ -912,6 +989,9 @@ enum Feature { // Explicit content detection. EXPLICIT_CONTENT_DETECTION = 3; + // Human face detection. + FACE_DETECTION = 4; + // Speech transcription. SPEECH_TRANSCRIPTION = 6; @@ -926,6 +1006,9 @@ enum Feature { // Celebrity recognition. CELEBRITY_RECOGNITION = 13; + + // Person detection. + PERSON_DETECTION = 14; } // Label detection mode. diff --git a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py index fd02c615..5b9680d2 100644 --- a/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py +++ b/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py @@ -35,7 +35,7 @@ "\n,com.google.cloud.videointelligence.v1p3beta1B\035VideoIntelligenceServiceProtoP\001ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\252\002(Google.Cloud.VideoIntelligence.V1P3Beta1\312\002(Google\\Cloud\\VideoIntelligence\\V1p3beta1" ), serialized_pb=_b( - '\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\xc0\x05\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xfb\x01\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x9d\n\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x42\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t")\n\'StreamingExplicitContentDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08"\x1f\n\x1dStreamingObjectTrackingConfig"$\n"StreamingShotChangeDetectionConfig"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"\x8c\x07\n\x14StreamingVideoConfig\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config*\xe6\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\x8d\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3' + '\nGgoogle/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto\x12(google.cloud.videointelligence.v1p3beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8c\x02\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12H\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.FeatureB\x03\xe0\x41\x02\x12M\n\rvideo_context\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoContext\x12\x17\n\noutput_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0blocation_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\x80\x07\n\x0cVideoContext\x12H\n\x08segments\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12^\n\x16label_detection_config\x18\x02 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig\x12i\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ShotChangeDetectionConfig\x12s\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig\x12\\\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig\x12h\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig\x12\\\n\x15text_detection_config\x18\x08 \x01(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.TextDetectionConfig\x12`\n\x17person_detection_config\x18\x0b \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig\x12^\n\x16object_tracking_config\x18\r \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig"\xe4\x01\n\x14LabelDetectionConfig\x12Z\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32<.google.cloud.videointelligence.v1p3beta1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"`\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x05 \x01(\x08"s\n\x15PersonDetectionConfig\x12\x1e\n\x16include_bounding_boxes\x18\x01 \x01(\x08\x12\x1e\n\x16include_pose_landmarks\x18\x02 \x01(\x08\x12\x1a\n\x12include_attributes\x18\x03 \x01(\x08"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"k\n\x0cLabelSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\xb0\x02\n\x0fLabelAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12K\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.LabelSegment\x12\x44\n\x06\x66rames\x18\x04 \x03(\x0b\x32\x34.google.cloud.videointelligence.v1p3beta1.LabelFrame"\x9c\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32\x34.google.cloud.videointelligence.v1p3beta1.Likelihood"k\n\x19\x45xplicitContentAnnotation\x12N\n\x06\x66rames\x18\x01 \x03(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xcf\x02\n\x11TimestampedObject\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12R\n\tlandmarks\x18\x04 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.DetectedLandmarkB\x03\xe0\x41\x01"\x99\x02\n\x05Track\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12X\n\x13timestamped_objects\x18\x02 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.TimestampedObject\x12T\n\nattributes\x18\x03 \x03(\x0b\x32;.google.cloud.videointelligence.v1p3beta1.DetectedAttributeB\x03\xe0\x41\x01\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x01"D\n\x11\x44\x65tectedAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\r\n\x05value\x18\x03 \x01(\t"D\n\tCelebrity\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t"\xab\x02\n\x0e\x43\x65lebrityTrack\x12\x61\n\x0b\x63\x65lebrities\x18\x01 \x03(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.CelebrityTrack.RecognizedCelebrity\x12\x43\n\nface_track\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x1aq\n\x13RecognizedCelebrity\x12\x46\n\tcelebrity\x18\x01 \x01(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.Celebrity\x12\x12\n\nconfidence\x18\x02 \x01(\x02"t\n\x1e\x43\x65lebrityRecognitionAnnotation\x12R\n\x10\x63\x65lebrity_tracks\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.CelebrityTrack"\x7f\n\x10\x44\x65tectedLandmark\x12\x0c\n\x04name\x18\x01 \x01(\t\x12I\n\x05point\x18\x02 \x01(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex\x12\x12\n\nconfidence\x18\x03 \x01(\x02"m\n\x17\x46\x61\x63\x65\x44\x65tectionAnnotation\x12?\n\x06tracks\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12\x11\n\tthumbnail\x18\x04 \x01(\x0c"\\\n\x19PersonDetectionAnnotation\x12?\n\x06tracks\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track"\xef\x0b\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12G\n\x07segment\x18\n \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\\\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n"segment_presence_label_annotations\x18\x17 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Y\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x62\n\x1fshot_presence_label_annotations\x18\x18 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12Z\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12\x65\n\x1a\x66\x61\x63\x65_detection_annotations\x18\r \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation\x12P\n\x10shot_annotations\x18\x06 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12`\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12\\\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.SpeechTranscription\x12R\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x38.google.cloud.videointelligence.v1p3beta1.TextAnnotation\x12^\n\x12object_annotations\x18\x0e \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation\x12i\n\x1clogo_recognition_annotations\x18\x13 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation\x12i\n\x1cperson_detection_annotations\x18\x14 \x03(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation\x12s\n!celebrity_recognition_annotations\x18\x15 \x01(\x0b\x32H.google.cloud.videointelligence.v1p3beta1.CelebrityRecognitionAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"u\n\x15\x41nnotateVideoResponse\x12\\\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults"\xb4\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32\x31.google.cloud.videointelligence.v1p3beta1.Feature\x12G\n\x07segment\x18\x06 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"w\n\x15\x41nnotateVideoProgress\x12^\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32\x41.google.cloud.videointelligence.v1p3beta1.VideoAnnotationProgress"\x88\x03\n\x19SpeechTranscriptionConfig\x12\x1a\n\rlanguage_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1d\n\x10max_alternatives\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1d\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1p3beta1.SpeechContextB\x03\xe0\x41\x01\x12)\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x42\x03\xe0\x41\x01\x12\x19\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x42\x03\xe0\x41\x01\x12\'\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12&\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x42\x03\xe0\x41\x01\x12#\n\x16\x65nable_word_confidence\x18\t \x01(\x08\x42\x03\xe0\x41\x01"%\n\rSpeechContext\x12\x14\n\x07phrases\x18\x01 \x03(\tB\x03\xe0\x41\x01"\x8f\x01\n\x13SpeechTranscription\x12\\\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32\x46.google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative\x12\x1a\n\rlanguage_code\x18\x02 \x01(\tB\x03\xe0\x41\x03"\x93\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x17\n\nconfidence\x18\x02 \x01(\x02\x42\x03\xe0\x41\x03\x12\x46\n\x05words\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1p3beta1.WordInfoB\x03\xe0\x41\x03"\xa7\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x17\n\nconfidence\x18\x04 \x01(\x02\x42\x03\xe0\x41\x03\x12\x18\n\x0bspeaker_tag\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"f\n\x16NormalizedBoundingPoly\x12L\n\x08vertices\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1p3beta1.NormalizedVertex"\xaf\x01\n\x0bTextSegment\x12G\n\x07segment\x18\x01 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x43\n\x06\x66rames\x18\x03 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1p3beta1.TextFrame"\x9b\x01\n\tTextFrame\x12^\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"g\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12G\n\x08segments\x18\x02 \x03(\x0b\x32\x35.google.cloud.videointelligence.v1p3beta1.TextSegment"\xa7\x01\n\x13ObjectTrackingFrame\x12`\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32?.google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xac\x02\n\x18ObjectTrackingAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12M\n\x06\x66rames\x18\x02 \x03(\x0b\x32=.google.cloud.videointelligence.v1p3beta1.ObjectTrackingFrame\x12I\n\x07segment\x18\x03 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x42\x0c\n\ntrack_info"\xe8\x01\n\x19LogoRecognitionAnnotation\x12@\n\x06\x65ntity\x18\x01 \x01(\x0b\x32\x30.google.cloud.videointelligence.v1p3beta1.Entity\x12?\n\x06tracks\x18\x02 \x03(\x0b\x32/.google.cloud.videointelligence.v1p3beta1.Track\x12H\n\x08segments\x18\x03 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment"\xa5\x01\n\x1dStreamingAnnotateVideoRequest\x12V\n\x0cvideo_config\x18\x01 \x01(\x0b\x32>.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfigH\x00\x12\x17\n\rinput_content\x18\x02 \x01(\x0cH\x00\x42\x13\n\x11streaming_request"\xca\x01\n\x1eStreamingAnnotateVideoResponse\x12!\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x65\n\x12\x61nnotation_results\x18\x02 \x01(\x0b\x32I.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults\x12\x1e\n\x16\x61nnotation_results_uri\x18\x03 \x01(\t"9\n#StreamingAutomlClassificationConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t"9\n#StreamingAutomlObjectTrackingConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t")\n\'StreamingExplicitContentDetectionConfig":\n\x1dStreamingLabelDetectionConfig\x12\x19\n\x11stationary_camera\x18\x01 \x01(\x08"\x1f\n\x1dStreamingObjectTrackingConfig"$\n"StreamingShotChangeDetectionConfig"o\n\x16StreamingStorageConfig\x12(\n enable_storage_annotation_result\x18\x01 \x01(\x08\x12+\n#annotation_result_storage_directory\x18\x03 \x01(\t"\x8b\x03\n\x1fStreamingVideoAnnotationResults\x12P\n\x10shot_annotations\x18\x01 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1p3beta1.VideoSegment\x12T\n\x11label_annotations\x18\x02 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1p3beta1.LabelAnnotation\x12`\n\x13\x65xplicit_annotation\x18\x03 \x01(\x0b\x32\x43.google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation\x12^\n\x12object_annotations\x18\x04 \x03(\x0b\x32\x42.google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation"\x8c\x07\n\x14StreamingVideoConfig\x12K\n\x07\x66\x65\x61ture\x18\x01 \x01(\x0e\x32:.google.cloud.videointelligence.v1p3beta1.StreamingFeature\x12t\n\x1cshot_change_detection_config\x18\x02 \x01(\x0b\x32L.google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfigH\x00\x12i\n\x16label_detection_config\x18\x03 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfigH\x00\x12~\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32Q.google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfigH\x00\x12i\n\x16object_tracking_config\x18\x05 \x01(\x0b\x32G.google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfigH\x00\x12u\n\x1c\x61utoml_classification_config\x18\x15 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlClassificationConfigH\x00\x12v\n\x1d\x61utoml_object_tracking_config\x18\x16 \x01(\x0b\x32M.google.cloud.videointelligence.v1p3beta1.StreamingAutomlObjectTrackingConfigH\x00\x12X\n\x0estorage_config\x18\x1e \x01(\x0b\x32@.google.cloud.videointelligence.v1p3beta1.StreamingStorageConfigB\x12\n\x10streaming_config*\x90\x02\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t\x12\x14\n\x10LOGO_RECOGNITION\x10\x0c\x12\x19\n\x15\x43\x45LEBRITY_RECOGNITION\x10\r\x12\x14\n\x10PERSON_DETECTION\x10\x0e*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05*\x8d\x02\n\x10StreamingFeature\x12!\n\x1dSTREAMING_FEATURE_UNSPECIFIED\x10\x00\x12\x1d\n\x19STREAMING_LABEL_DETECTION\x10\x01\x12#\n\x1fSTREAMING_SHOT_CHANGE_DETECTION\x10\x02\x12(\n$STREAMING_EXPLICIT_CONTENT_DETECTION\x10\x03\x12\x1d\n\x19STREAMING_OBJECT_TRACKING\x10\x04\x12#\n\x1fSTREAMING_AUTOML_CLASSIFICATION\x10\x15\x12$\n STREAMING_AUTOML_OBJECT_TRACKING\x10\x16\x32\xce\x02\n\x18VideoIntelligenceService\x12\xdb\x01\n\rAnnotateVideo\x12>.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x1f"\x1a/v1p3beta1/videos:annotate:\x01*\xda\x41\x12input_uri,features\xca\x41.\n\x15\x41nnotateVideoResponse\x12\x15\x41nnotateVideoProgress\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platform2\xad\x02\n!StreamingVideoIntelligenceService\x12\xb1\x01\n\x16StreamingAnnotateVideo\x12G.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest\x1aH.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse"\x00(\x01\x30\x01\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x80\x02\n,com.google.cloud.videointelligence.v1p3beta1B\x1dVideoIntelligenceServiceProtoP\x01ZYgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence\xaa\x02(Google.Cloud.VideoIntelligence.V1P3Beta1\xca\x02(Google\\Cloud\\VideoIntelligence\\V1p3beta1b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -82,42 +82,52 @@ serialized_options=None, type=None, ), + _descriptor.EnumValueDescriptor( + name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None + ), _descriptor.EnumValueDescriptor( name="SPEECH_TRANSCRIPTION", - index=4, + index=5, number=6, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=5, number=7, serialized_options=None, type=None + name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( name="OBJECT_TRACKING", - index=6, + index=7, number=9, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( name="LOGO_RECOGNITION", - index=7, + index=8, number=12, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( name="CELEBRITY_RECOGNITION", - index=8, + index=9, number=13, serialized_options=None, type=None, ), + _descriptor.EnumValueDescriptor( + name="PERSON_DETECTION", + index=10, + number=14, + serialized_options=None, + type=None, + ), ], containing_type=None, serialized_options=None, - serialized_start=10023, - serialized_end=10253, + serialized_start=11058, + serialized_end=11330, ) _sym_db.RegisterEnumDescriptor(_FEATURE) @@ -151,8 +161,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=10255, - serialized_end=10369, + serialized_start=11332, + serialized_end=11446, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -188,8 +198,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=10371, - serialized_end=10487, + serialized_start=11448, + serialized_end=11564, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -252,8 +262,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=10490, - serialized_end=10759, + serialized_start=11567, + serialized_end=11836, ) _sym_db.RegisterEnumDescriptor(_STREAMINGFEATURE) @@ -262,11 +272,13 @@ LABEL_DETECTION = 1 SHOT_CHANGE_DETECTION = 2 EXPLICIT_CONTENT_DETECTION = 3 +FACE_DETECTION = 4 SPEECH_TRANSCRIPTION = 6 TEXT_DETECTION = 7 OBJECT_TRACKING = 9 LOGO_RECOGNITION = 12 CELEBRITY_RECOGNITION = 13 +PERSON_DETECTION = 14 LABEL_DETECTION_MODE_UNSPECIFIED = 0 SHOT_MODE = 1 FRAME_MODE = 2 @@ -494,10 +506,28 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="face_detection_config", + full_name="google.cloud.videointelligence.v1p3beta1.VideoContext.face_detection_config", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), _descriptor.FieldDescriptor( name="speech_transcription_config", full_name="google.cloud.videointelligence.v1p3beta1.VideoContext.speech_transcription_config", - index=4, + index=5, number=6, type=11, cpp_type=10, @@ -515,7 +545,7 @@ _descriptor.FieldDescriptor( name="text_detection_config", full_name="google.cloud.videointelligence.v1p3beta1.VideoContext.text_detection_config", - index=5, + index=6, number=8, type=11, cpp_type=10, @@ -530,10 +560,28 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="person_detection_config", + full_name="google.cloud.videointelligence.v1p3beta1.VideoContext.person_detection_config", + index=7, + number=11, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), _descriptor.FieldDescriptor( name="object_tracking_config", full_name="google.cloud.videointelligence.v1p3beta1.VideoContext.object_tracking_config", - index=6, + index=8, number=13, type=11, cpp_type=10, @@ -558,7 +606,7 @@ extension_ranges=[], oneofs=[], serialized_start=604, - serialized_end=1308, + serialized_end=1500, ) @@ -668,8 +716,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1311, - serialized_end=1539, + serialized_start=1503, + serialized_end=1731, ) @@ -707,8 +755,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1541, - serialized_end=1583, + serialized_start=1733, + serialized_end=1775, ) @@ -746,8 +794,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1585, - serialized_end=1622, + serialized_start=1777, + serialized_end=1814, ) @@ -785,8 +833,158 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1624, - serialized_end=1671, + serialized_start=1816, + serialized_end=1863, +) + + +_FACEDETECTIONCONFIG = _descriptor.Descriptor( + name="FaceDetectionConfig", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="model", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.model", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="include_bounding_boxes", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.include_bounding_boxes", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="include_attributes", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig.include_attributes", + index=2, + number=5, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1865, + serialized_end=1961, +) + + +_PERSONDETECTIONCONFIG = _descriptor.Descriptor( + name="PersonDetectionConfig", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="include_bounding_boxes", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.include_bounding_boxes", + index=0, + number=1, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="include_pose_landmarks", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.include_pose_landmarks", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="include_attributes", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig.include_attributes", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1963, + serialized_end=2078, ) @@ -842,8 +1040,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1673, - serialized_end=1733, + serialized_start=2080, + serialized_end=2140, ) @@ -899,8 +1097,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1735, - serialized_end=1855, + serialized_start=2142, + serialized_end=2262, ) @@ -956,8 +1154,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1857, - serialized_end=1964, + serialized_start=2264, + serialized_end=2371, ) @@ -1013,8 +1211,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1966, - serialized_end=2046, + serialized_start=2373, + serialized_end=2453, ) @@ -1088,8 +1286,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2048, - serialized_end=2119, + serialized_start=2455, + serialized_end=2526, ) @@ -1181,8 +1379,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2122, - serialized_end=2426, + serialized_start=2529, + serialized_end=2833, ) @@ -1238,8 +1436,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2429, - serialized_end=2585, + serialized_start=2836, + serialized_end=2992, ) @@ -1277,8 +1475,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2587, - serialized_end=2694, + serialized_start=2994, + serialized_end=3101, ) @@ -1370,8 +1568,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2696, - serialized_end=2777, + serialized_start=3103, + serialized_end=3184, ) @@ -1436,6 +1634,24 @@ serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="landmarks", + full_name="google.cloud.videointelligence.v1p3beta1.TimestampedObject.landmarks", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\001"), + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[], @@ -1445,8 +1661,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2780, - serialized_end=3031, + serialized_start=3187, + serialized_end=3522, ) @@ -1538,8 +1754,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3034, - serialized_end=3315, + serialized_start=3525, + serialized_end=3806, ) @@ -1613,8 +1829,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3317, - serialized_end=3385, + serialized_start=3808, + serialized_end=3876, ) @@ -1688,8 +1904,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3387, - serialized_end=3455, + serialized_start=3878, + serialized_end=3946, ) @@ -1745,8 +1961,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3644, - serialized_end=3757, + serialized_start=4135, + serialized_end=4248, ) _CELEBRITYTRACK = _descriptor.Descriptor( @@ -1801,8 +2017,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3458, - serialized_end=3757, + serialized_start=3949, + serialized_end=4248, ) @@ -1840,8 +2056,179 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3759, - serialized_end=3875, + serialized_start=4250, + serialized_end=4366, +) + + +_DETECTEDLANDMARK = _descriptor.Descriptor( + name="DetectedLandmark", + full_name="google.cloud.videointelligence.v1p3beta1.DetectedLandmark", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.cloud.videointelligence.v1p3beta1.DetectedLandmark.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="point", + full_name="google.cloud.videointelligence.v1p3beta1.DetectedLandmark.point", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="confidence", + full_name="google.cloud.videointelligence.v1p3beta1.DetectedLandmark.confidence", + index=2, + number=3, + type=2, + cpp_type=6, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4368, + serialized_end=4495, +) + + +_FACEDETECTIONANNOTATION = _descriptor.Descriptor( + name="FaceDetectionAnnotation", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="tracks", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.tracks", + index=0, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="thumbnail", + full_name="google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation.thumbnail", + index=1, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4497, + serialized_end=4606, +) + + +_PERSONDETECTIONANNOTATION = _descriptor.Descriptor( + name="PersonDetectionAnnotation", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="tracks", + full_name="google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation.tracks", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4608, + serialized_end=4700, ) @@ -1978,10 +2365,28 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="face_detection_annotations", + full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.face_detection_annotations", + index=7, + number=13, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), _descriptor.FieldDescriptor( name="shot_annotations", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.shot_annotations", - index=7, + index=8, number=6, type=11, cpp_type=10, @@ -1999,7 +2404,7 @@ _descriptor.FieldDescriptor( name="explicit_annotation", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.explicit_annotation", - index=8, + index=9, number=7, type=11, cpp_type=10, @@ -2017,7 +2422,7 @@ _descriptor.FieldDescriptor( name="speech_transcriptions", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.speech_transcriptions", - index=9, + index=10, number=11, type=11, cpp_type=10, @@ -2035,7 +2440,7 @@ _descriptor.FieldDescriptor( name="text_annotations", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.text_annotations", - index=10, + index=11, number=12, type=11, cpp_type=10, @@ -2053,7 +2458,7 @@ _descriptor.FieldDescriptor( name="object_annotations", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.object_annotations", - index=11, + index=12, number=14, type=11, cpp_type=10, @@ -2071,7 +2476,7 @@ _descriptor.FieldDescriptor( name="logo_recognition_annotations", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.logo_recognition_annotations", - index=12, + index=13, number=19, type=11, cpp_type=10, @@ -2086,10 +2491,28 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="person_detection_annotations", + full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.person_detection_annotations", + index=14, + number=20, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), _descriptor.FieldDescriptor( name="celebrity_recognition_annotations", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.celebrity_recognition_annotations", - index=13, + index=15, number=21, type=11, cpp_type=10, @@ -2107,7 +2530,7 @@ _descriptor.FieldDescriptor( name="error", full_name="google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults.error", - index=14, + index=16, number=9, type=11, cpp_type=10, @@ -2131,8 +2554,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3878, - serialized_end=5187, + serialized_start=4703, + serialized_end=6222, ) @@ -2170,8 +2593,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5189, - serialized_end=5306, + serialized_start=6224, + serialized_end=6341, ) @@ -2299,8 +2722,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5309, - serialized_end=5617, + serialized_start=6344, + serialized_end=6652, ) @@ -2338,8 +2761,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5619, - serialized_end=5738, + serialized_start=6654, + serialized_end=6773, ) @@ -2521,8 +2944,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5741, - serialized_end=6133, + serialized_start=6776, + serialized_end=7168, ) @@ -2560,8 +2983,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6135, - serialized_end=6172, + serialized_start=7170, + serialized_end=7207, ) @@ -2617,8 +3040,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6175, - serialized_end=6318, + serialized_start=7210, + serialized_end=7353, ) @@ -2692,8 +3115,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6321, - serialized_end=6468, + serialized_start=7356, + serialized_end=7503, ) @@ -2803,8 +3226,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6471, - serialized_end=6638, + serialized_start=7506, + serialized_end=7673, ) @@ -2860,8 +3283,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6640, - serialized_end=6680, + serialized_start=7675, + serialized_end=7715, ) @@ -2899,8 +3322,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6682, - serialized_end=6784, + serialized_start=7717, + serialized_end=7819, ) @@ -2974,8 +3397,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6787, - serialized_end=6962, + serialized_start=7822, + serialized_end=7997, ) @@ -3031,8 +3454,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=6965, - serialized_end=7120, + serialized_start=8000, + serialized_end=8155, ) @@ -3088,8 +3511,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=7122, - serialized_end=7225, + serialized_start=8157, + serialized_end=8260, ) @@ -3145,8 +3568,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=7228, - serialized_end=7395, + serialized_start=8263, + serialized_end=8430, ) @@ -3264,8 +3687,8 @@ fields=[], ) ], - serialized_start=7398, - serialized_end=7698, + serialized_start=8433, + serialized_end=8733, ) @@ -3339,8 +3762,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=7701, - serialized_end=7933, + serialized_start=8736, + serialized_end=8968, ) @@ -3404,8 +3827,8 @@ fields=[], ) ], - serialized_start=7936, - serialized_end=8101, + serialized_start=8971, + serialized_end=9136, ) @@ -3479,8 +3902,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8104, - serialized_end=8306, + serialized_start=9139, + serialized_end=9341, ) @@ -3518,8 +3941,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8308, - serialized_end=8365, + serialized_start=9343, + serialized_end=9400, ) @@ -3557,8 +3980,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8367, - serialized_end=8424, + serialized_start=9402, + serialized_end=9459, ) @@ -3577,8 +4000,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8426, - serialized_end=8467, + serialized_start=9461, + serialized_end=9502, ) @@ -3616,8 +4039,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8469, - serialized_end=8527, + serialized_start=9504, + serialized_end=9562, ) @@ -3636,8 +4059,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8529, - serialized_end=8560, + serialized_start=9564, + serialized_end=9595, ) @@ -3656,8 +4079,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8562, - serialized_end=8598, + serialized_start=9597, + serialized_end=9633, ) @@ -3713,8 +4136,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8600, - serialized_end=8711, + serialized_start=9635, + serialized_end=9746, ) @@ -3806,8 +4229,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=8714, - serialized_end=9109, + serialized_start=9749, + serialized_end=10144, ) @@ -3979,8 +4402,8 @@ fields=[], ) ], - serialized_start=9112, - serialized_end=10020, + serialized_start=10147, + serialized_end=11055, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -3995,12 +4418,18 @@ _VIDEOCONTEXT.fields_by_name[ "explicit_content_detection_config" ].message_type = _EXPLICITCONTENTDETECTIONCONFIG +_VIDEOCONTEXT.fields_by_name[ + "face_detection_config" +].message_type = _FACEDETECTIONCONFIG _VIDEOCONTEXT.fields_by_name[ "speech_transcription_config" ].message_type = _SPEECHTRANSCRIPTIONCONFIG _VIDEOCONTEXT.fields_by_name[ "text_detection_config" ].message_type = _TEXTDETECTIONCONFIG +_VIDEOCONTEXT.fields_by_name[ + "person_detection_config" +].message_type = _PERSONDETECTIONCONFIG _VIDEOCONTEXT.fields_by_name[ "object_tracking_config" ].message_type = _OBJECTTRACKINGCONFIG @@ -4033,6 +4462,7 @@ "time_offset" ].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _TIMESTAMPEDOBJECT.fields_by_name["attributes"].message_type = _DETECTEDATTRIBUTE +_TIMESTAMPEDOBJECT.fields_by_name["landmarks"].message_type = _DETECTEDLANDMARK _TRACK.fields_by_name["segment"].message_type = _VIDEOSEGMENT _TRACK.fields_by_name["timestamped_objects"].message_type = _TIMESTAMPEDOBJECT _TRACK.fields_by_name["attributes"].message_type = _DETECTEDATTRIBUTE @@ -4047,6 +4477,9 @@ _CELEBRITYRECOGNITIONANNOTATION.fields_by_name[ "celebrity_tracks" ].message_type = _CELEBRITYTRACK +_DETECTEDLANDMARK.fields_by_name["point"].message_type = _NORMALIZEDVERTEX +_FACEDETECTIONANNOTATION.fields_by_name["tracks"].message_type = _TRACK +_PERSONDETECTIONANNOTATION.fields_by_name["tracks"].message_type = _TRACK _VIDEOANNOTATIONRESULTS.fields_by_name["segment"].message_type = _VIDEOSEGMENT _VIDEOANNOTATIONRESULTS.fields_by_name[ "segment_label_annotations" @@ -4063,6 +4496,9 @@ _VIDEOANNOTATIONRESULTS.fields_by_name[ "frame_label_annotations" ].message_type = _LABELANNOTATION +_VIDEOANNOTATIONRESULTS.fields_by_name[ + "face_detection_annotations" +].message_type = _FACEDETECTIONANNOTATION _VIDEOANNOTATIONRESULTS.fields_by_name["shot_annotations"].message_type = _VIDEOSEGMENT _VIDEOANNOTATIONRESULTS.fields_by_name[ "explicit_annotation" @@ -4079,6 +4515,9 @@ _VIDEOANNOTATIONRESULTS.fields_by_name[ "logo_recognition_annotations" ].message_type = _LOGORECOGNITIONANNOTATION +_VIDEOANNOTATIONRESULTS.fields_by_name[ + "person_detection_annotations" +].message_type = _PERSONDETECTIONANNOTATION _VIDEOANNOTATIONRESULTS.fields_by_name[ "celebrity_recognition_annotations" ].message_type = _CELEBRITYRECOGNITIONANNOTATION @@ -4245,6 +4684,8 @@ DESCRIPTOR.message_types_by_name[ "ExplicitContentDetectionConfig" ] = _EXPLICITCONTENTDETECTIONCONFIG +DESCRIPTOR.message_types_by_name["FaceDetectionConfig"] = _FACEDETECTIONCONFIG +DESCRIPTOR.message_types_by_name["PersonDetectionConfig"] = _PERSONDETECTIONCONFIG DESCRIPTOR.message_types_by_name["TextDetectionConfig"] = _TEXTDETECTIONCONFIG DESCRIPTOR.message_types_by_name["VideoSegment"] = _VIDEOSEGMENT DESCRIPTOR.message_types_by_name["LabelSegment"] = _LABELSEGMENT @@ -4264,6 +4705,11 @@ DESCRIPTOR.message_types_by_name[ "CelebrityRecognitionAnnotation" ] = _CELEBRITYRECOGNITIONANNOTATION +DESCRIPTOR.message_types_by_name["DetectedLandmark"] = _DETECTEDLANDMARK +DESCRIPTOR.message_types_by_name["FaceDetectionAnnotation"] = _FACEDETECTIONANNOTATION +DESCRIPTOR.message_types_by_name[ + "PersonDetectionAnnotation" +] = _PERSONDETECTIONANNOTATION DESCRIPTOR.message_types_by_name["VideoAnnotationResults"] = _VIDEOANNOTATIONRESULTS DESCRIPTOR.message_types_by_name["AnnotateVideoResponse"] = _ANNOTATEVIDEORESPONSE DESCRIPTOR.message_types_by_name["VideoAnnotationProgress"] = _VIDEOANNOTATIONPROGRESS @@ -4393,10 +4839,14 @@ Config for SHOT\_CHANGE\_DETECTION. explicit_content_detection_config: Config for EXPLICIT\_CONTENT\_DETECTION. + face_detection_config: + Config for FACE\_DETECTION. speech_transcription_config: Config for SPEECH\_TRANSCRIPTION. text_detection_config: Config for TEXT\_DETECTION. + person_detection_config: + Config for PERSON\_DETECTION. object_tracking_config: Config for OBJECT\_TRACKING. """, @@ -4506,6 +4956,60 @@ ) _sym_db.RegisterMessage(ExplicitContentDetectionConfig) +FaceDetectionConfig = _reflection.GeneratedProtocolMessageType( + "FaceDetectionConfig", + (_message.Message,), + dict( + DESCRIPTOR=_FACEDETECTIONCONFIG, + __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + __doc__="""Config for FACE\_DETECTION. + + + Attributes: + model: + Model to use for face detection. Supported values: + "builtin/stable" (the default if unset) and "builtin/latest". + include_bounding_boxes: + Whether bounding boxes be included in the face annotation + output. + include_attributes: + Whether to enable face attributes detection, such as glasses, + dark\_glasses, mouth\_open etc. Ignored if + 'include\_bounding\_boxes' is false. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig) + ), +) +_sym_db.RegisterMessage(FaceDetectionConfig) + +PersonDetectionConfig = _reflection.GeneratedProtocolMessageType( + "PersonDetectionConfig", + (_message.Message,), + dict( + DESCRIPTOR=_PERSONDETECTIONCONFIG, + __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + __doc__="""Config for PERSON\_DETECTION. + + + Attributes: + include_bounding_boxes: + Whether bounding boxes be included in the person detection + annotation output. + include_pose_landmarks: + Whether to enable pose landmarks detection. Ignored if + 'include\_bounding\_boxes' is false. + include_attributes: + Whether to enable person attributes detection, such as cloth + color (black, blue, etc), type (coat, dress, etc), pattern + (plain, floral, etc), hair color (black, blonde, etc), hair + length (long, short, bald), etc. Ignored if + 'include\_bounding\_boxes' is false. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig) + ), +) +_sym_db.RegisterMessage(PersonDetectionConfig) + TextDetectionConfig = _reflection.GeneratedProtocolMessageType( "TextDetectionConfig", (_message.Message,), @@ -4559,7 +5063,8 @@ dict( DESCRIPTOR=_LABELSEGMENT, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for label detection. + __doc__="""Video segment level annotation results for label + detection. Attributes: @@ -4672,9 +5177,9 @@ dict( DESCRIPTOR=_EXPLICITCONTENTANNOTATION, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Explicit content annotation (based on per-frame visual signals only). If - no explicit content has been detected in a frame, no annotations are - present for that frame. + __doc__="""Explicit content annotation (based on per-frame visual + signals only). If no explicit content has been detected in a frame, no + annotations are present for that frame. Attributes: @@ -4692,8 +5197,8 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGBOX, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding box. The normalized vertex coordinates are relative - to the original image. Range: [0, 1]. + __doc__="""Normalized bounding box. The normalized vertex coordinates + are relative to the original image. Range: [0, 1]. Attributes: @@ -4717,8 +5222,8 @@ dict( DESCRIPTOR=_TIMESTAMPEDOBJECT, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""For tracking related features. An object at time\_offset with - attributes, and located with normalized\_bounding\_box. + __doc__="""For tracking related features. An object at time\_offset + with attributes, and located with normalized\_bounding\_box. Attributes: @@ -4730,6 +5235,8 @@ corresponding to the video frame for this object. attributes: Optional. The attributes of the object in the bounding box. + landmarks: + Optional. The detected landmarks. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.TimestampedObject) ), @@ -4767,7 +5274,8 @@ dict( DESCRIPTOR=_DETECTEDATTRIBUTE, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A generic detected attribute represented by name in string format. + __doc__="""A generic detected attribute represented by name in string + format. Attributes: @@ -4836,9 +5344,9 @@ ), DESCRIPTOR=_CELEBRITYTRACK, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""The annotation result of a celebrity face track. RecognizedCelebrity - field could be empty if the face track does not have any matched - celebrities. + __doc__="""The annotation result of a celebrity face track. + RecognizedCelebrity field could be empty if the face track does not have + any matched celebrities. Attributes: @@ -4872,6 +5380,69 @@ ) _sym_db.RegisterMessage(CelebrityRecognitionAnnotation) +DetectedLandmark = _reflection.GeneratedProtocolMessageType( + "DetectedLandmark", + (_message.Message,), + dict( + DESCRIPTOR=_DETECTEDLANDMARK, + __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + __doc__="""A generic detected landmark represented by name in string + format and a 2D location. + + + Attributes: + name: + The name of this landmark, i.e. left\_hand, right\_shoulder. + point: + The 2D point of the detected landmark using the normalized + image coordindate system. The normalized coordinates have the + range from 0 to 1. + confidence: + The confidence score of the detected landmark. Range [0, 1]. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.DetectedLandmark) + ), +) +_sym_db.RegisterMessage(DetectedLandmark) + +FaceDetectionAnnotation = _reflection.GeneratedProtocolMessageType( + "FaceDetectionAnnotation", + (_message.Message,), + dict( + DESCRIPTOR=_FACEDETECTIONANNOTATION, + __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + __doc__="""Face detection annotation. + + + Attributes: + tracks: + The face tracks with attributes. + thumbnail: + The thumbnail of a person's face. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation) + ), +) +_sym_db.RegisterMessage(FaceDetectionAnnotation) + +PersonDetectionAnnotation = _reflection.GeneratedProtocolMessageType( + "PersonDetectionAnnotation", + (_message.Message,), + dict( + DESCRIPTOR=_PERSONDETECTIONANNOTATION, + __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", + __doc__="""Person detection annotation per video. + + + Attributes: + tracks: + The trackes that a person is detected. + """, + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation) + ), +) +_sym_db.RegisterMessage(PersonDetectionAnnotation) + VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), @@ -4914,6 +5485,8 @@ frame_label_annotations: Label annotations on frame level. There is exactly one element for each unique label. + face_detection_annotations: + Face detection annotations. shot_annotations: Shot annotations. Each shot is represented as a video segment. explicit_annotation: @@ -4929,6 +5502,8 @@ logo_recognition_annotations: Annotations for list of logos detected, tracked and recognized in video. + person_detection_annotations: + Person detection annotations. celebrity_recognition_annotations: Celebrity recognition annotations. error: @@ -4947,8 +5522,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation response. Included in the ``response`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation response. Included in the ``response`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -5000,8 +5575,8 @@ dict( DESCRIPTOR=_ANNOTATEVIDEOPROGRESS, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video annotation progress. Included in the ``metadata`` field of the - ``Operation`` returned by the ``GetOperation`` call of the + __doc__="""Video annotation progress. Included in the ``metadata`` + field of the ``Operation`` returned by the ``GetOperation`` call of the ``google::longrunning::Operations`` service. @@ -5091,8 +5666,8 @@ dict( DESCRIPTOR=_SPEECHCONTEXT, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Provides "hints" to the speech recognizer to favor specific words and - phrases in the results. + __doc__="""Provides "hints" to the speech recognizer to favor + specific words and phrases in the results. Attributes: @@ -5117,7 +5692,8 @@ dict( DESCRIPTOR=_SPEECHTRANSCRIPTION, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""A speech recognition result corresponding to a portion of the audio. + __doc__="""A speech recognition result corresponding to a portion of + the audio. Attributes: @@ -5175,9 +5751,9 @@ dict( DESCRIPTOR=_WORDINFO, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Word-specific information for recognized words. Word information is only - included in the response when certain request parameters are set, such - as ``enable_word_time_offsets``. + __doc__="""Word-specific information for recognized words. Word + information is only included in the response when certain request + parameters are set, such as ``enable_word_time_offsets``. Attributes: @@ -5239,10 +5815,11 @@ dict( DESCRIPTOR=_NORMALIZEDBOUNDINGPOLY, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Normalized bounding polygon for text (that might not be aligned with - axis). Contains list of the corner points in clockwise order starting - from top-left corner. For example, for a rectangular bounding box: When - the text is horizontal it might look like: 0----1 \| \| 3----2 + __doc__="""Normalized bounding polygon for text (that might not be + aligned with axis). Contains list of the corner points in clockwise + order starting from top-left corner. For example, for a rectangular + bounding box: When the text is horizontal it might look like: 0----1 \| + \| 3----2 When it's clockwise rotated 180 degrees around the top-left corner it becomes: 2----3 \| \| 1----0 @@ -5291,9 +5868,9 @@ dict( DESCRIPTOR=_TEXTFRAME, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for text annotation (OCR). Contains - information regarding timestamp and bounding box locations for the - frames containing detected OCR text snippets. + __doc__="""Video frame level annotation results for text annotation + (OCR). Contains information regarding timestamp and bounding box + locations for the frames containing detected OCR text snippets. Attributes: @@ -5313,9 +5890,9 @@ dict( DESCRIPTOR=_TEXTANNOTATION, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotations related to one detected OCR text snippet. This will contain - the corresponding text, confidence value, and frame level information - for each detection. + __doc__="""Annotations related to one detected OCR text snippet. This + will contain the corresponding text, confidence value, and frame level + information for each detection. Attributes: @@ -5335,8 +5912,9 @@ dict( DESCRIPTOR=_OBJECTTRACKINGFRAME, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotations for object detection and tracking. This - field stores per frame location, time offset, and confidence. + __doc__="""Video frame level annotations for object detection and + tracking. This field stores per frame location, time offset, and + confidence. Attributes: @@ -5397,8 +5975,8 @@ dict( DESCRIPTOR=_LOGORECOGNITIONANNOTATION, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Annotation corresponding to one detected, tracked and recognized logo - class. + __doc__="""Annotation corresponding to one detected, tracked and + recognized logo class. Attributes: @@ -5435,8 +6013,8 @@ Attributes: streaming_request: - *Required* The streaming request, which is either a streaming - config or video content. + \ *Required* The streaming request, which is either a + streaming config or video content. video_config: Provides information to the annotator, specifing how to process the request. The first @@ -5462,10 +6040,10 @@ dict( DESCRIPTOR=_STREAMINGANNOTATEVIDEORESPONSE, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""``StreamingAnnotateVideoResponse`` is the only message returned to the - client by ``StreamingAnnotateVideo``. A series of zero or more - ``StreamingAnnotateVideoResponse`` messages are streamed back to the - client. + __doc__="""\ ``StreamingAnnotateVideoResponse`` is the only message + returned to the client by ``StreamingAnnotateVideo``. A series of zero + or more ``StreamingAnnotateVideoResponse`` messages are streamed back to + the client. Attributes: @@ -5530,6 +6108,7 @@ DESCRIPTOR=_STREAMINGEXPLICITCONTENTDETECTIONCONFIG, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", __doc__="""Config for STREAMING\_EXPLICIT\_CONTENT\_DETECTION. + """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingExplicitContentDetectionConfig) ), @@ -5563,6 +6142,7 @@ DESCRIPTOR=_STREAMINGOBJECTTRACKINGCONFIG, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", __doc__="""Config for STREAMING\_OBJECT\_TRACKING. + """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingObjectTrackingConfig) ), @@ -5576,6 +6156,7 @@ DESCRIPTOR=_STREAMINGSHOTCHANGEDETECTIONCONFIG, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", __doc__="""Config for STREAMING\_SHOT\_CHANGE\_DETECTION. + """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig) ), @@ -5619,8 +6200,8 @@ dict( DESCRIPTOR=_STREAMINGVIDEOANNOTATIONRESULTS, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Streaming annotation results corresponding to a portion of the video - that is currently being processed. + __doc__="""Streaming annotation results corresponding to a portion of + the video that is currently being processed. Attributes: @@ -5645,8 +6226,8 @@ dict( DESCRIPTOR=_STREAMINGVIDEOCONFIG, __module__="google.cloud.videointelligence_v1p3beta1.proto.video_intelligence_pb2", - __doc__="""Provides information to the annotator that specifies how to process the - request. + __doc__="""Provides information to the annotator that specifies how + to process the request. Attributes: @@ -5680,6 +6261,7 @@ _ANNOTATEVIDEOREQUEST.fields_by_name["output_uri"]._options = None _ANNOTATEVIDEOREQUEST.fields_by_name["location_id"]._options = None _TIMESTAMPEDOBJECT.fields_by_name["attributes"]._options = None +_TIMESTAMPEDOBJECT.fields_by_name["landmarks"]._options = None _TRACK.fields_by_name["attributes"]._options = None _TRACK.fields_by_name["confidence"]._options = None _SPEECHTRANSCRIPTIONCONFIG.fields_by_name["language_code"]._options = None @@ -5708,8 +6290,8 @@ serialized_options=_b( "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=10762, - serialized_end=11096, + serialized_start=11839, + serialized_end=12173, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", @@ -5737,8 +6319,8 @@ serialized_options=_b( "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" ), - serialized_start=11099, - serialized_end=11400, + serialized_start=12176, + serialized_end=12477, methods=[ _descriptor.MethodDescriptor( name="StreamingAnnotateVideo", diff --git a/google/cloud/videointelligence_v1p3beta1/types.py b/google/cloud/videointelligence_v1p3beta1/types.py index a56aaf9c..ed9b4d4c 100644 --- a/google/cloud/videointelligence_v1p3beta1/types.py +++ b/google/cloud/videointelligence_v1p3beta1/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/noxfile.py b/noxfile.py index 17c85f3d..fd48fd0d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -72,6 +72,7 @@ def default(session): session.run( "py.test", "--quiet", + "--cov=google.cloud.videointelligence", "--cov=google.cloud", "--cov=tests.unit", "--cov-append", diff --git a/synth.metadata b/synth.metadata index a34e8a27..525f859b 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2020-02-03T22:16:32.560860Z", + "updateTime": "2020-02-05T13:22:43.739935Z", "sources": [ { "generator": { @@ -12,9 +12,9 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "29d40b78e3dc1579b0b209463fbcb76e5767f72a", - "internalRef": "292979741", - "log": "29d40b78e3dc1579b0b209463fbcb76e5767f72a\nExpose managedidentities/v1beta1/ API for client library usage.\n\nPiperOrigin-RevId: 292979741\n\na22129a1fb6e18056d576dfb7717aef74b63734a\nExpose managedidentities/v1/ API for client library usage.\n\nPiperOrigin-RevId: 292968186\n\nb5cbe4a4ba64ab19e6627573ff52057a1657773d\nSecurityCenter v1p1beta1: move file-level option on top to workaround protobuf.js bug.\n\nPiperOrigin-RevId: 292647187\n\nb224b317bf20c6a4fbc5030b4a969c3147f27ad3\nAdds API definitions for bigqueryreservation v1beta1.\n\nPiperOrigin-RevId: 292634722\n\nc1468702f9b17e20dd59007c0804a089b83197d2\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 292626173\n\nffdfa4f55ab2f0afc11d0eb68f125ccbd5e404bd\nvision: v1p3beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605599\n\n78f61482cd028fc1d9892aa5d89d768666a954cd\nvision: v1p1beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605125\n\n60bb5a294a604fd1778c7ec87b265d13a7106171\nvision: v1p2beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604980\n\n3bcf7aa79d45eb9ec29ab9036e9359ea325a7fc3\nvision: v1p4beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604656\n\n2717b8a1c762b26911b45ecc2e4ee01d98401b28\nFix dataproc artman client library generation.\n\nPiperOrigin-RevId: 292555664\n\n7ac66d9be8a7d7de4f13566d8663978c9ee9dcd7\nAdd Dataproc Autoscaling API to V1.\n\nPiperOrigin-RevId: 292450564\n\n5d932b2c1be3a6ef487d094e3cf5c0673d0241dd\n- Improve documentation\n- Add a client_id field to StreamingPullRequest\n\nPiperOrigin-RevId: 292434036\n\neaff9fa8edec3e914995ce832b087039c5417ea7\nmonitoring: v3 publish annotations and client retry config\n\nPiperOrigin-RevId: 292425288\n\n70958bab8c5353870d31a23fb2c40305b050d3fe\nBigQuery Storage Read API v1 clients.\n\nPiperOrigin-RevId: 292407644\n\n7a15e7fe78ff4b6d5c9606a3264559e5bde341d1\nUpdate backend proto for Google Cloud Endpoints\n\nPiperOrigin-RevId: 292391607\n\n3ca2c014e24eb5111c8e7248b1e1eb833977c83d\nbazel: Add --flaky_test_attempts=3 argument to prevent CI failures caused by flaky tests\n\nPiperOrigin-RevId: 292382559\n\n9933347c1f677e81e19a844c2ef95bfceaf694fe\nbazel:Integrate latest protoc-java-resource-names-plugin changes (fix for PyYAML dependency in bazel rules)\n\nPiperOrigin-RevId: 292376626\n\nb835ab9d2f62c88561392aa26074c0b849fb0bd3\nasset: v1p2beta1 add client config annotations\n\n* remove unintentionally exposed RPCs\n* remove messages relevant to removed RPCs\n\nPiperOrigin-RevId: 292369593\n\nc1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n08b488e0660c59842a7dee0e3e2b65d9e3a514a9\nExposing cloud_catalog.proto (This API is already available through REST)\n\nPiperOrigin-RevId: 288625007\n\na613482977e11ac09fa47687a5d1b5a01efcf794\nUpdate the OS Login v1beta API description to render better in the UI.\n\nPiperOrigin-RevId: 288547940\n\n5e182b8d9943f1b17008d69d4c7e865dc83641a7\nUpdate the OS Login API description to render better in the UI.\n\nPiperOrigin-RevId: 288546443\n\ncb79155f596e0396dd900da93872be7066f6340d\nFix: Add a resource annotation for Agent\nFix: Correct the service name in annotations for Intent and SessionEntityType\n\nPiperOrigin-RevId: 288441307\n\nf7f6e9daec3315fd47cb638789bd8415bf4a27cc\nAdded cloud asset api v1p1beta1\n\nPiperOrigin-RevId: 288427239\n\nf2880f5b342c6345f3dcaad24fcb3c6ca9483654\nBilling account API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 288351810\n\ndc250ffe071729f8f8bef9d6fd0fbbeb0254c666\nFix: Remove incorrect resource annotations in requests\n\nPiperOrigin-RevId: 288321208\n\n91ef2d9dd69807b0b79555f22566fb2d81e49ff9\nAdd GAPIC annotations to Cloud KMS (but do not migrate the GAPIC config yet).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 287999179\n\n4d45a6399e9444fbddaeb1c86aabfde210723714\nRefreshing Cloud Billing API protos.\n\nThis exposes the following API methods:\n- UpdateBillingAccount\n- CreateBillingAccount\n- GetIamPolicy\n- SetIamPolicy\n- TestIamPermissions\n\nThere are also some new fields to support the management of sub-accounts.\n\nPiperOrigin-RevId: 287908369\n\nec285d3d230810147ebbf8d5b691ee90320c6d2d\nHide not yet implemented update_transforms message\n\nPiperOrigin-RevId: 287608953\n\na202fb3b91cd0e4231be878b0348afd17067cbe2\nBigQuery Storage Write API v1alpha2 clients. The service is enabled by whitelist only.\n\nPiperOrigin-RevId: 287379998\n\n650d7f1f8adb0cfaf37b3ce2241c3168f24efd4d\nUpdate Readme.md to match latest Bazel updates\n090d98aea20270e3be4b64240775588f7ce50ff8\ndocs(bigtable): Fix library release level listed in generated documentation\n\nPiperOrigin-RevId: 287308849\n\n2c28f646ca77b1d57550368be22aa388adde2e66\nfirestore: retry reads that fail with contention\n\nPiperOrigin-RevId: 287250665\n\nfd3091fbe9b2083cabc53dc50c78035658bfc4eb\nSync timeout in grpc config back to 10s for tasks API with github googelapis gapic config.\n\nPiperOrigin-RevId: 287207067\n\n49dd7d856a6f77c0cf7e5cb3334423e5089a9e8a\nbazel: Integrate bazel-2.0.0 compatibility fixes\n\nPiperOrigin-RevId: 287205644\n\n46e52fd64973e815cae61e78b14608fe7aa7b1df\nbazel: Integrate bazel build file generator\n\nTo generate/update BUILD.bazel files for any particular client or a batch of clients:\n```\nbazel run //:build_gen -- --src=google/example/library\n```\n\nPiperOrigin-RevId: 286958627\n\n1a380ea21dea9b6ac6ad28c60ad96d9d73574e19\nBigQuery Storage Read API v1beta2 clients.\n\nPiperOrigin-RevId: 286616241\n\n5f3f1d0f1c06b6475a17d995e4f7a436ca67ec9e\nAdd Artman config for secretmanager.\n\nPiperOrigin-RevId: 286598440\n\n50af0530730348f1e3697bf3c70261f7daaf2981\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 286491002\n\n91818800384f4ed26961aea268910b1a2ec58cc8\nFor Data Catalog API,\n1. Add support for marking a tag template field as required when creating a new tag template.\n2. Add support for updating a tag template field from required to optional.\n\nPiperOrigin-RevId: 286490262\n\nff4a2047b3d66f38c9b22197c370ed0d02fc0238\nWeekly library update.\n\nPiperOrigin-RevId: 286484215\n\n192c14029861752a911ed434fd6ee5b850517cd9\nWeekly library update.\n\nPiperOrigin-RevId: 286484165\n\nd9e328eaf790d4e4346fbbf32858160f497a03e0\nFix bazel build (versions 1.x)\n\nBump gapic-generator and resource names plugins to the latest version.\n\nPiperOrigin-RevId: 286469287\n\n0ca305403dcc50e31ad9477c9b6241ddfd2056af\nsecretmanager client package name option updates for java and go\n\nPiperOrigin-RevId: 286439553\n\nade4803e8a1a9e3efd249c8c86895d2f12eb2aaa\niam credentials: publish v1 protos containing annotations\n\nPiperOrigin-RevId: 286418383\n\n03e5708e5f8d1909dcb74b25520309e59ebf24be\nsecuritycenter: add missing proto deps for Bazel build\n\nPiperOrigin-RevId: 286417075\n\n8b991eb3eb82483b0ca1f1361a9c8e5b375c4747\nAdd secretmanager client package name options.\n\nPiperOrigin-RevId: 286415883\n\nd400cb8d45df5b2ae796b909f098a215b2275c1d\ndialogflow: add operation_info annotations to BatchUpdateEntities and BatchDeleteEntities.\n\nPiperOrigin-RevId: 286312673\n\nf2b25232db397ebd4f67eb901a2a4bc99f7cc4c6\nIncreased the default timeout time for all the Cloud Security Command Center client libraries.\n\nPiperOrigin-RevId: 286263771\n\ncb2f1eefd684c7efd56fd375cde8d4084a20439e\nExposing new Resource fields in the SecurityCenterProperties proto, added more comments to the filter logic for these Resource fields, and updated the response proto for the ListFindings API with the new Resource fields.\n\nPiperOrigin-RevId: 286263092\n\n73cebb20432b387c3d8879bb161b517d60cf2552\nUpdate v1beta2 clusters and jobs to include resource ids in GRPC header.\n\nPiperOrigin-RevId: 286261392\n\n1b4e453d51c0bd77e7b73896cdd8357d62768d83\nsecuritycenter: publish v1beta1 protos with annotations\n\nPiperOrigin-RevId: 286228860\n\na985eeda90ae98e8519d2320bee4dec148eb8ccb\nAdd default retry configurations for speech_v1p1beta1.\n\nSettings are copied from speech_gapic.legacy.yaml. The Python client library is being generated with timeouts that are too low. See https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2578\n\nPiperOrigin-RevId: 286191318\n\n3352100a15ede383f5ab3c34599f7a10a3d066fe\nMake importing rule with the same name (but different aliases) from different repositories possible.\n\nThis is needed to allow monolitic gapic-generator and microgenerators coexist during transition period.\n\nTo plug a microgenerator:\n\n1) Add corresponding rules bidnings under `switched_rules_by_language` in repository_rules.bzl:\n rules[\"go_gapic_library2\"] = _switch(\n go and grpc and gapic,\n \"@gapic_generator_go//rules_go_gapic/go_gapic.bzl\",\n \"go_gapic_library\",\n )\n\n2) Import microgenerator in WORKSPACE (the above example assumes that the generator was imported under name \"gapic_generator_go\").\n\n3) To migrate an API from monolith to micro generator (this is done per API and per language) modify the corresponding load statement in the API's BUILD.bazel file. For example, for the example above, to migrate to go microgenerator modify the go-specific load statement in BUILD.bazel file of a specific API (which you want to migrate) to the following:\n\nload(\n \"@com_google_googleapis_imports//:imports.bzl\",\n \"go_gapic_assembly_pkg\",\n go_gapic_library = \"go_gapic_library2\",\n \"go_proto_library\",\n \"go_test\",\n)\n\nPiperOrigin-RevId: 286065440\n\n6ad2bb13bc4b0f3f785517f0563118f6ca52ddfd\nUpdated v1beta1 protos for the client:\n- added support for GenericSignedAttestation which has a generic Signature\n- added support for CVSSv3 and WindowsDetail in Vulnerability\n- documentation updates\n\nPiperOrigin-RevId: 286008145\n\nfe1962e49999a832eed8162c45f23096336a9ced\nAdMob API v1 20191210\n\nBasic account info, mediation and network report available. See https://developers.google.com/admob/api/release-notes for more details.\n\nPiperOrigin-RevId: 285894502\n\n41fc1403738b61427f3a798ca9750ef47eb9c0f2\nAnnotate the required fields for the Monitoring Dashboards API\n\nPiperOrigin-RevId: 285824386\n\n27d0e0f202cbe91bf155fcf36824a87a5764ef1e\nRemove inappropriate resource_reference annotations for UpdateWorkflowTemplateRequest.template.\n\nPiperOrigin-RevId: 285802643\n\ne5c4d3a2b5b5bef0a30df39ebb27711dc98dee64\nAdd Artman BUILD.bazel file for the Monitoring Dashboards API\n\nPiperOrigin-RevId: 285445602\n\n2085a0d3c76180ee843cf2ecef2b94ca5266be31\nFix path in the artman config for Monitoring Dashboard API.\n\nPiperOrigin-RevId: 285233245\n\n2da72dfe71e4cca80902f9e3e125c40f02c2925b\nAdd Artman and GAPIC configs for the Monitoring Dashboards API.\n\nPiperOrigin-RevId: 285211544\n\n9f6eeebf1f30f51ffa02acea5a71680fe592348e\nAdd annotations to Dataproc v1. (Also forwarding comment changes from internal source control.)\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 285197557\n\n19c4589a3cb44b3679f7b3fba88365b3d055d5f8\noslogin: fix v1beta retry configuration\n\nPiperOrigin-RevId: 285013366\n\nee3f02926d0f8a0bc13f8d716581aad20f575751\nAdd Monitoring Dashboards API protocol buffers to Google Cloud Monitoring API.\n\nPiperOrigin-RevId: 284982647\n\ne47fdd266542386e5e7346697f90476e96dc7ee8\nbigquery datatransfer: Remove non-publicly available DataSourceService.\n\nPiperOrigin-RevId: 284822593\n\n6156f433fd1d9d5e4a448d6c6da7f637921d92ea\nAdds OSConfig v1beta protos and initial client library config\n\nPiperOrigin-RevId: 284799663\n\n6cc9499e225a4f6a5e34fe07e390f67055d7991c\nAdd datetime.proto to google/type/BUILD.bazel\n\nPiperOrigin-RevId: 284643689\n\nfe7dd5277e39ffe0075729c61e8d118d7527946d\nCosmetic changes to proto comment as part of testing internal release instructions.\n\nPiperOrigin-RevId: 284608712\n\n68d109adad726b89f74276d2f4b2ba6aac6ec04a\nAdd annotations to securitycenter v1, but leave GAPIC v1 in place.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 284580511\n\ndf8a1707a910fc17c71407a75547992fd1864c51\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 284568564\n\na69a974976221ce3bb944901b739418b85d6408c\nclient library update\n\nPiperOrigin-RevId: 284463979\n\na4adac3a12aca6e3a792c9c35ee850435fe7cf7e\nAdded DateTime, TimeZone, and Month proto files to google/type\n\nPiperOrigin-RevId: 284277770\n\ned5dec392906078db4f7745fe4f11d34dd401ae9\nchange common resources from message-level annotations to file-level annotations.\n\nPiperOrigin-RevId: 284236794\n\na00e2c575ef1b637667b4ebe96b8c228b2ddb273\nbigquerydatatransfer: change resource type TransferRun to Run to be consistent with gapic configs\nbigquerydatatransfer: add missing patterns for DataSource, TransferConfig and Run (to allow the location segment)\nbigquerydatatransfer: add file-level Parent resource type (to allow the location segement)\nbigquerydatatransfer: update grpc service config with correct retry delays\n\nPiperOrigin-RevId: 284234378\n\nb10e4547017ca529ac8d183e839f3c272e1c13de\ncloud asset: replace required fields for batchgetassethistory. Correct the time out duration.\n\nPiperOrigin-RevId: 284059574\n\n6690161e3dcc3367639a2ec10db67bf1cf392550\nAdd default retry configurations for speech_v1.\n\nSettings are copied from speech_gapic.legacy.yaml. The Python client library is being generated with timeouts that are too low. See https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2578\n\nPiperOrigin-RevId: 284035915\n\n9b2635ef91e114f0357bdb87652c26a8f59316d5\ncloudtasks: fix gapic v2 config\n\nPiperOrigin-RevId: 284020555\n\ne5676ba8b863951a8ed0bfd6046e1db38062743c\nReinstate resource name handling in GAPIC config for Asset v1.\n\nPiperOrigin-RevId: 283993903\n\nf337f7fb702c85833b7b6ca56afaf9a1bf32c096\nOSConfig AgentEndpoint: add LookupEffectiveGuestPolicy rpc\n\nPiperOrigin-RevId: 283989762\n\nc0ac9b55f2e2efd0ee525b3a6591a1b09330e55a\nInclude real time feed api into v1 version\n\nPiperOrigin-RevId: 283845474\n\n2427a3a0f6f4222315362d973d91a082a3a884a7\nfirestore admin: update v1 protos with annotations & retry config\n\nPiperOrigin-RevId: 283826605\n\n555e844dbe04af50a8f55fe1217fa9d39a0a80b2\nchore: publish retry configs for iam admin, cloud asset, and remoteworkers\n\nPiperOrigin-RevId: 283801979\n\n6311dc536668849142d1fe5cd9fc46da66d1f77f\nfirestore: update v1beta1 protos with annotations and retry config\n\nPiperOrigin-RevId: 283794315\n\nda0edeeef953b05eb1524d514d2e9842ac2df0fd\nfeat: publish several retry config files for client generation\n\nPiperOrigin-RevId: 283614497\n\n59a78053537e06190f02d0a7ffb792c34e185c5a\nRemoving TODO comment\n\nPiperOrigin-RevId: 283592535\n\n8463992271d162e2aff1d5da5b78db11f2fb5632\nFix bazel build\n\nPiperOrigin-RevId: 283589351\n\n3bfcb3d8df10dfdba58f864d3bdb8ccd69364669\nPublic client library for bebop_jobs_api_20191118_1_RC3 release.\n\nPiperOrigin-RevId: 283568877\n\n27ab0db61021d267c452b34d149161a7bf0d9f57\nfirestore: publish annotated protos and new retry config\n\nPiperOrigin-RevId: 283565148\n\n38dc36a2a43cbab4a2a9183a43dd0441670098a9\nfeat: add http annotations for operations calls\n\nPiperOrigin-RevId: 283384331\n\n366caab94906975af0e17822e372f1d34e319d51\ndatastore: add a legacy artman config for PHP generation\n\nPiperOrigin-RevId: 283378578\n\n82944da21578a53b74e547774cf62ed31a05b841\nMigrate container v1beta1 to GAPIC v2.\n\nPiperOrigin-RevId: 283342796\n\n584dcde5826dd11ebe222016b7b208a4e1196f4b\nRemove resource name annotation for UpdateKeyRequest.key, because it's the resource, not a name.\n\nPiperOrigin-RevId: 283167368\n\n6ab0171e3688bfdcf3dbc4056e2df6345e843565\nAdded resource annotation for Key message.\n\nPiperOrigin-RevId: 283066965\n\n86c1a2db1707a25cec7d92f8850cc915163ec3c3\nExpose Admin API methods for Key manipulation.\n\nPiperOrigin-RevId: 282988776\n\n3ddad085965896ffb205d44cb0c0616fe3def10b\nC++ targets: correct deps so they build, rename them from trace* to cloudtrace*\nto match the proto names.\n\nPiperOrigin-RevId: 282857635\n\ne9389365a971ad6457ceb9646c595e79dfdbdea5\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 282810797\n\ne42eaaa9abed3c4d63d64f790bd3191448dbbca6\nPut back C++ targets for cloud trace v2 api.\n\nPiperOrigin-RevId: 282803841\n\nd8896a3d8a191702a9e39f29cf4c2e16fa05f76d\nAdd initial BUILD.bazel for secretmanager.googleapis.com\n\nPiperOrigin-RevId: 282674885\n\n2cc56cb83ea3e59a6364e0392c29c9e23ad12c3a\nCreate sample for list recommendations\n\nPiperOrigin-RevId: 282665402\n\nf88e2ca65790e3b44bb3455e4779b41de1bf7136\nbump Go to ga\n\nPiperOrigin-RevId: 282651105\n\naac86d932b3cefd7d746f19def6935d16d6235e0\nDocumentation update. Add location_id in preparation for regionalization.\n\nPiperOrigin-RevId: 282586371\n\n5b501cd384f6b842486bd41acce77854876158e7\nMigrate Datastore Admin to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 282570874\n\n6a16d474d5be201b20a27646e2009c4dfde30452\nMigrate Datastore to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 282564329\n\n74bd9b95ac8c70b883814e4765a725cffe43d77c\nmark Go lib ga\n\nPiperOrigin-RevId: 282562558\n\nf7b3d434f44f6a77cf6c37cae5474048a0639298\nAdd secretmanager.googleapis.com protos\n\nPiperOrigin-RevId: 282546399\n\nc34a911aaa0660a45f5a556578f764f135e6e060\niot: bump Go GAPIC to GA release level\n\nPiperOrigin-RevId: 282494787\n\n79b7f1c5ba86859dbf70aa6cd546057c1002cdc0\nPut back C++ targets.\nPrevious change overrode custom C++ targets made by external teams. This PR puts those targets back.\n\nPiperOrigin-RevId: 282458292\n\n06a840781d2dc1b0a28e03e30fb4b1bfb0b29d1e\nPopulate BAZEL.build files for around 100 APIs (all APIs we publish) in all 7 langauges.\n\nPiperOrigin-RevId: 282449910\n\n777b580a046c4fa84a35e1d00658b71964120bb0\nCreate BUILD file for recommender v1beta1\n\nPiperOrigin-RevId: 282068850\n\n48b385b6ef71dfe2596490ea34c9a9a434e74243\nGenerate recommender v1beta1 gRPC ServiceConfig file\n\nPiperOrigin-RevId: 282067795\n\n8395b0f1435a4d7ce8737b3b55392627758bd20c\nfix: Set timeout to 25s, because Tasks fails for any deadline above 30s.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 282017295\n\n3ba7ddc4b2acf532bdfb0004ca26311053c11c30\nfix: Shift Ruby and PHP to legacy GAPIC YAMLs for back-compat.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281852671\n\nad6f0c002194c3ec6c13d592d911d122d2293931\nRemove unneeded yaml files\n\nPiperOrigin-RevId: 281835839\n\n1f42588e4373750588152cdf6f747de1cadbcbef\nrefactor: Migrate Tasks beta 2 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281769558\n\n902b51f2073e9958a2aba441f7f7ac54ea00966d\nrefactor: Migrate Tasks to GAPIC v2 (for real this time).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281769522\n\n17561f59970eede87f61ef6e9c322fa1198a2f4d\nMigrate Tasks Beta 3 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281769519\n\nf95883b15a1ddd58eb7e3583fdefe7b00505faa3\nRegenerate recommender v1beta1 protos and sanitized yaml\n\nPiperOrigin-RevId: 281765245\n\n9a52df54c626b36699a058013d1735a166933167\nadd gRPC ServiceConfig for grafeas v1\n\nPiperOrigin-RevId: 281762754\n\n7a79d682ef40c5ca39c3fca1c0901a8e90021f8a\nfix: Roll back Tasks GAPIC v2 while we investigate C# issue.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281758548\n\n3fc31491640a90f029f284289e7e97f78f442233\nMigrate Tasks to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281751187\n\n5bc0fecee454f857cec042fb99fe2d22e1bff5bc\nfix: adds operation HTTP rules back to v1p1beta1 config\n\nPiperOrigin-RevId: 281635572\n\n5364a19284a1333b3ffe84e4e78a1919363d9f9c\nbazel: Fix build\n\n1) Update to latest gapic-generator (has iam resource names fix for java).\n2) Fix non-trivial issues with oslogin (resources defined in sibling package to the one they are used from) and monitoring.\n3) Fix trivial missing dependencies in proto_library targets for other apis.\n\nThis is to prepare the repository to being populated with BUILD.bazel files for all supported apis (101 API) in all 7 languages.\n\nPiperOrigin-RevId: 281618750\n\n0aa77cbe45538d5e5739eb637db3f2940b912789\nUpdating common proto files in google/type/ with their latest versions.\n\nPiperOrigin-RevId: 281603926\n\nd47e1b4485b3effbb2298eb10dd13a544c0f66dc\nfix: replace Speech Recognize RPC retry_codes_name for non-standard assignment\n\nPiperOrigin-RevId: 281594037\n\n16543773103e2619d2b5f52456264de5bb9be104\nRegenerating public protos for datacatalog, also adding gRPC service config.\n\nPiperOrigin-RevId: 281423227\n\n328ebe76adb06128d12547ed70107fb841aebf4e\nChange custom data type from String to google.protobuf.Struct to be consistent with other docs such as\nhttps://developers.google.com/actions/smarthome/develop/process-intents#response_format\n\nPiperOrigin-RevId: 281402467\n\n5af83f47b9656261cafcf88b0b3334521ab266b3\n(internal change without visible public changes)\n\nPiperOrigin-RevId: 281334391\n\nc53ed56649583a149382bd88d3c427be475b91b6\nFix typo in protobuf docs.\n\nPiperOrigin-RevId: 281293109\n\nd8dd7fe8d5304f7bd1c52207703d7f27d5328c5a\nFix build by adding missing deps.\n\nPiperOrigin-RevId: 281088257\n\n3ef5ffd7351809d75c1332d2eaad1f24d9c318e4\nMigrate Error Reporting v1beta1 to proto annotations / GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281075722\n\n418ee8e24a56b5959e1c1defa4b6c97f883be379\nTrace v2: Add remaining proto annotations, migrate to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 281068859\n\nc89394342a9ef70acaf73a6959e04b943fbc817b\nThis change updates an outdated comment for the feature importance proto field since they are no longer in [0, 1] for online predictions.\n\nPiperOrigin-RevId: 280761373\n\n1ec8b8e2c3c8f41d7d2b22c594c025276d6a4ae6\nCode refactoring\n\nPiperOrigin-RevId: 280760149\n\n427a22b04039f93b769d89accd6f487413f667c1\nImport automl operation protos.\n\nPiperOrigin-RevId: 280703572\n\n45749a04dac104e986f6cc47da3baf7c8bb6f9b0\nfix: bigqueryconnection_gapic.yaml to reflect proto annotations\n\n* remove connection_credential resource\n* make CreateCredentialRequest.connection_id optional\n* shuffle field ordering in CreateCredential flattening\n\nPiperOrigin-RevId: 280685438\n\n8385366aa1e5d7796793db02a9c5e167d1fd8f17\nRevert the Trace v2 GAPIC for now.\nCommitter: @lukesneeringer\n\nPiperOrigin-RevId: 280669295\n\n5c8ab2c072d557c2f4c4e54b544394e2d62202d5\nMigrate Trace v1 and Trace v2 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 280667429\n\nf6808ff4e8b966cd571e99279d4a2780ed97dff2\nRename the `endpoint_urls` field to `endpoint_uris` to be consistent with\nGoogle API nomenclature.\n\nPiperOrigin-RevId: 280581337\n\n1935fb8889686f5c9d107f11b3c6870fc3aa7cdc\nComment updates\n\nPiperOrigin-RevId: 280451656\n\n0797fd5b9029d630e68a0899734715d62ad38e33\nComment updates\n\nPiperOrigin-RevId: 280451600\n\n9bc8d07b8b749e791d16c8d559526928ceaf1994\nRollback of \"Migrate Cloud Error Reporting to proto annotations & GAPIC v2.\"\n\nPiperOrigin-RevId: 280445975\n\nf8720321aecf4aab42e03602ac2c67f9777d9170\nfix: bigtable retry config in GAPIC v2\n\nPiperOrigin-RevId: 280434856\n\nb11664ba64f92d96d748e0dd9724d006dcafd120\nMigrate Cloud Error Reporting to proto annotations & GAPIC v2.\n\nPiperOrigin-RevId: 280432937\n\n4f747bda9b099b4426f495985680d16d0227fa5f\n1. Change DataCatalog package name in java from com.google.cloud.datacatalog to com.google.cloud.datacatalog.v1beta1 (API version is included in the package). *This is a breaking change.*\n\n2. Add API for Taxonomies (PolicyTagManager and PolicyTagManagerSerialization services).\n\n3. Minor changes to documentation.\n\nPiperOrigin-RevId: 280394936\n\nbc76ffd87360ce1cd34e3a6eac28afd5e1efda76\nUse rules_proto bzl files to load proto_library\n\nThis makes googleapis forward compatible with Bazel incompatible change https://github.com/bazelbuild/bazel/issues/8922.\n\nThis CL was created by adding @rules_proto to the WORKSPACE file and then running:\n\nfind . -name BUILD.bazel | \\\n while read build; do \\\n buildifier --lint=fix --warnings=load $build; \\\n done\n\nSince buildifier cannot be told not to reformat the BUILD file, some files are reformatted.\n\nPiperOrigin-RevId: 280356106\n\n218164b3deba1075979c9dca5f71461379e42dd1\nMake the `permissions` argument in TestIamPermissions required.\n\nPiperOrigin-RevId: 280279014\n\ndec8fd8ea5dc464496606189ba4b8949188639c8\nUpdating Cloud Billing Budget API documentation for clarity.\n\nPiperOrigin-RevId: 280225437\n\na667ffab90deb5e2669eb40ec7b61ec96a3d0454\nIntroduced detailed status message for CreateTimeSeries: CreateTimeSeriesSummary replaces CreateTimeSeriesError, which is now deprecated and unused.\n\nPiperOrigin-RevId: 280221707\n\nbe0a25eceec8916633447a37af0ecea801b85186\nMigrate Bigtable API to GAPIC v2 config.\n\nPiperOrigin-RevId: 280199643\n\n88bbf96b90089994ed16208a0f38cdd07f743742\nFix location of monitoring.yaml in Artman config for monitoring v3.\n\nPiperOrigin-RevId: 280134477\n\ndbaa01a20303758eed0c5a95ad2239ea306ad9a5\nUpdate namespace for PHP.\n\nPiperOrigin-RevId: 280085199\n\nf73b3796a635b2026a590d5133af7fa1f0eb807b\nStandardize pub/sub client default settings across clients:\n- Add retry codes for streaming pull\n- Decrease publish's max_rpc_timeout (mini-timeout) from 10 mins to 1 min\n- Decrease publish's total timeout from 10 mins to 1 min\n- Increase publish batching threshold from 10 to 100 elements\n- Increase publish batching size threshold from 1 KiB to 1 MiB\n\nPiperOrigin-RevId: 280044012\n\n822172613e1d93bede3beaf78b123c42a5876e2b\nReplace local_repository with http_archive in WORKSPACE\n\nPiperOrigin-RevId: 280039052\n\n6a8c7914d1b79bd832b5157a09a9332e8cbd16d4\nAdded notification_supported_by_agent to indicate whether the agent is sending notifications to Google or not.\n\nPiperOrigin-RevId: 279991530\n\n675de3dc9ab98cc1cf54216ad58c933ede54e915\nAdd an endpoint_urls field to the instance admin proto and adds a field_mask field to the GetInstanceRequest.\n\nPiperOrigin-RevId: 279982263\n\nf69562be0608904932bdcfbc5ad8b9a22d9dceb8\nAdds some clarification to IAM Policy public proto comments about the policy versioning compliance check for etag-less SetIamPolicy requests.\n\nPiperOrigin-RevId: 279774957\n\n4e86b2538758e3155e867d1cb4155ee91de7c6e9\nDocumentation update. Add the new action for sending metrics to Stackdriver.\n\nPiperOrigin-RevId: 279768476\n\neafaf30b7a3af0bc72f323fe6a6827327d3cad75\nfix: Restore deleted field to avoid a breaking change.\n\nPiperOrigin-RevId: 279760458\n\ned13a73f3054a29b764f104feaa503820b75140a\nAdd GAPIC annotations to the GKE API.\n\nPiperOrigin-RevId: 279734275\n\n6b125955bf0d6377b96f205e5d187e9d524b7ea2\nUpdate timeouts to 1 hour for default and streaming RPCs.\n\nPiperOrigin-RevId: 279657866\n\n989b304c8a6cfe72bdd7cb264e0d71b784db9421\nAdd Service Monitoring (Service and ServiceLevelObjective) protocol buffers to Google Cloud Monitoring API.\n\nPiperOrigin-RevId: 279649144\n\n1ef3bed9594674bb571ce20418af307505e3f609\nUpdating configs for AgentEndpoint to fix the client library generation.\n\nPiperOrigin-RevId: 279518887\n\n34e661f58d58fa57da8ed113a3d8bb3de26b307d\nUpdate v1beta2 clusters and jobs to include resource ids in GRPC header.\n\nPiperOrigin-RevId: 279417429\n\n248abde06efb7e5a3d81b84de02c8272122b0c3b\nIntegrate GAPIC Python Bazel Extensions\n\nAlso configure python build for the following clients as an example:\n\ndiaglogflow/v2\nlanguage/v1\ntexttospeech/v1\nfirestore/v1beta1\npubsub/v1\n\nPiperOrigin-RevId: 279406526\n\n7ffbf721e29b8806e0c8947c5dd0cdddc02de72a\nOSConfig Agentendpoint: Rename ReportTaskStart to StartNextTask\n\nPiperOrigin-RevId: 279389774\n\n2642d8688bab8981c8a5153b7578f9ff8460a37c\nAgentendpoint API: minor doc updates, addition of exclusive_packages|patches to PatchConfigs.\n\nPiperOrigin-RevId: 279326626\n\nd323b287c782802242005072d15f1474d7d10819\nDocumentation changes.\n\nPiperOrigin-RevId: 279234903\n\n29927f71d92d59551a42272ab7c6e97e8413af78\nPublishing Billing Budgets v1alpha1 API.\n\nPiperOrigin-RevId: 279176561\n\nff413d36f8358818d76fa92006f2d8f608843093\nAdding gRPC service config for Billing Budgets API.\n\nPiperOrigin-RevId: 279175129\n\n3eb91187709cc96bb890c110f518505f65ffd95d\nagentendpoint: removes all gapic languages except Go from artman config\n\nPiperOrigin-RevId: 279173968\n\na34950f968c7944a1036551b545557edcc18c767\nFix bazel build.\n\nUpdate gapic-generator and protoc-java-resource-name plugin dependencies to the latest versions.\n\nThe following clients remain broken because of bugs in gapic-generator and/or corresponding configs\n\ngoogle/cloud/iot/v1\ngoogle/cloud/oslogin/v1\ngoogle/spanner/admin/instance/v1\ngoogle/cloud/oslogin/v1\n\nPiperOrigin-RevId: 279171061\n\n0ed34e9fdf601dfc37eb24c40e17495b86771ff4\nAdds agentendpoint protos and initial client library config\n\nPiperOrigin-RevId: 279147036\n\ncad1d3b365a90c2a9f014b84a2a1acb55c15480f\nUpdates to MediaCard\n\nPiperOrigin-RevId: 279100776\n\n05556c26b633c153f2eca62aeafbcd62705f41b7\nUpdates to MediaCard\n\nPiperOrigin-RevId: 279100278\n\n2275670a746ab2bc03ebba0d914b45320ea15af4\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 278922329\n\n" + "sha": "a8ed9d921fdddc61d8467bfd7c1668f0ad90435c", + "internalRef": "293257997", + "log": "a8ed9d921fdddc61d8467bfd7c1668f0ad90435c\nfix: set Ruby module name for OrgPolicy\n\nPiperOrigin-RevId: 293257997\n\n6c7d28509bd8315de8af0889688ee20099594269\nredis: v1beta1 add UpgradeInstance and connect_mode field to Instance\n\nPiperOrigin-RevId: 293242878\n\nae0abed4fcb4c21f5cb67a82349a049524c4ef68\nredis: v1 add connect_mode field to Instance\n\nPiperOrigin-RevId: 293241914\n\n3f7a0d29b28ee9365771da2b66edf7fa2b4e9c56\nAdds service config definition for bigqueryreservation v1beta1\n\nPiperOrigin-RevId: 293234418\n\n0c88168d5ed6fe353a8cf8cbdc6bf084f6bb66a5\naddition of BUILD & configuration for accessapproval v1\n\nPiperOrigin-RevId: 293219198\n\n39bedc2e30f4778ce81193f6ba1fec56107bcfc4\naccessapproval: v1 publish protos\n\nPiperOrigin-RevId: 293167048\n\n69d9945330a5721cd679f17331a78850e2618226\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080182\n\nf6a1a6b417f39694275ca286110bc3c1ca4db0dc\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080178\n\n" } }, { diff --git a/tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py b/tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py index fca6c1e1..7c829cef 100644 --- a/tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py +++ b/tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -83,9 +83,9 @@ def test_annotate_video(self): client = videointelligence_v1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" response = client.annotate_video(input_uri=input_uri, features=features) result = response.result() @@ -93,7 +93,7 @@ def test_annotate_video(self): assert len(channel.requests) == 1 expected_request = video_intelligence_pb2.AnnotateVideoRequest( - input_uri=input_uri, features=features + features=features, input_uri=input_uri ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -114,9 +114,9 @@ def test_annotate_video_exception(self): client = videointelligence_v1.VideoIntelligenceServiceClient() # Setup Request - input_uri = "gs://cloud-samples-data/video/cat.mp4" features_element = enums.Feature.LABEL_DETECTION features = [features_element] + input_uri = "gs://cloud-samples-data/video/cat.mp4" response = client.annotate_video(input_uri=input_uri, features=features) exception = response.exception() diff --git a/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py index df1c2d2a..b457c478 100644 --- a/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py +++ b/tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py b/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py index 5d7ee107..6dddff91 100644 --- a/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py +++ b/tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py b/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py index 5bc061fb..01f0ad5c 100644 --- a/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py +++ b/tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py b/tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py index 0bc5741c..f6f6e7ac 100644 --- a/tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py +++ b/tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py b/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py index 5fac526d..e6f9d431 100644 --- a/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py +++ b/tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.