diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py index 1e72bd8f..95c8c0b5 100644 --- a/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py @@ -276,6 +276,12 @@ async def annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py index 6261b9a5..71d92661 100644 --- a/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/client.py @@ -331,10 +331,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def annotate_video( @@ -438,6 +435,19 @@ def annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py index 0138e4a0..9e1d41db 100644 --- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py index ab9f9c5c..aad743ff 100644 --- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py @@ -272,5 +272,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py index 18771166..45e866a1 100644 --- a/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py @@ -279,5 +279,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1/types/video_intelligence.py b/google/cloud/videointelligence_v1/types/video_intelligence.py index 078f5f0c..d9070658 100644 --- a/google/cloud/videointelligence_v1/types/video_intelligence.py +++ b/google/cloud/videointelligence_v1/types/video_intelligence.py @@ -107,6 +107,7 @@ class Likelihood(proto.Enum): class AnnotateVideoRequest(proto.Message): r"""Video annotation request. + Attributes: input_uri (str): Input video location. Currently, only `Cloud @@ -157,6 +158,7 @@ class AnnotateVideoRequest(proto.Message): class VideoContext(proto.Message): r"""Video context and/or feature-specific parameters. + Attributes: segments (Sequence[google.cloud.videointelligence_v1.types.VideoSegment]): Video segments to annotate. The segments may @@ -210,6 +212,7 @@ class VideoContext(proto.Message): class LabelDetectionConfig(proto.Message): r"""Config for LABEL_DETECTION. + Attributes: label_detection_mode (google.cloud.videointelligence_v1.types.LabelDetectionMode): What labels should be detected with LABEL_DETECTION, in @@ -251,6 +254,7 @@ class LabelDetectionConfig(proto.Message): class ShotChangeDetectionConfig(proto.Message): r"""Config for SHOT_CHANGE_DETECTION. + Attributes: model (str): Model to use for shot change detection. @@ -263,6 +267,7 @@ class ShotChangeDetectionConfig(proto.Message): class ObjectTrackingConfig(proto.Message): r"""Config for OBJECT_TRACKING. + Attributes: model (str): Model to use for object tracking. @@ -275,6 +280,7 @@ class ObjectTrackingConfig(proto.Message): class FaceDetectionConfig(proto.Message): r"""Config for FACE_DETECTION. + Attributes: model (str): Model to use for face detection. @@ -296,6 +302,7 @@ class FaceDetectionConfig(proto.Message): class PersonDetectionConfig(proto.Message): r"""Config for PERSON_DETECTION. + Attributes: include_bounding_boxes (bool): Whether bounding boxes are included in the @@ -317,6 +324,7 @@ class PersonDetectionConfig(proto.Message): class ExplicitContentDetectionConfig(proto.Message): r"""Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model (str): Model to use for explicit content detection. @@ -329,6 +337,7 @@ class ExplicitContentDetectionConfig(proto.Message): class TextDetectionConfig(proto.Message): r"""Config for TEXT_DETECTION. + Attributes: language_hints (Sequence[str]): Language hint can be specified if the @@ -351,6 +360,7 @@ class TextDetectionConfig(proto.Message): class VideoSegment(proto.Message): r"""Video segment. + Attributes: start_time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -372,6 +382,7 @@ class VideoSegment(proto.Message): class LabelSegment(proto.Message): r"""Video segment level annotation results for label detection. + Attributes: segment (google.cloud.videointelligence_v1.types.VideoSegment): Video segment where a label was detected. @@ -385,6 +396,7 @@ class LabelSegment(proto.Message): class LabelFrame(proto.Message): r"""Video frame level annotation results for label detection. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -400,6 +412,7 @@ class LabelFrame(proto.Message): class Entity(proto.Message): r"""Detected entity from video analysis. + Attributes: entity_id (str): Opaque entity ID. Some IDs may be available in `Google @@ -418,6 +431,7 @@ class Entity(proto.Message): class LabelAnnotation(proto.Message): r"""Label annotation. + Attributes: entity (google.cloud.videointelligence_v1.types.Entity): Detected entity. @@ -444,6 +458,7 @@ class LabelAnnotation(proto.Message): class ExplicitContentFrame(proto.Message): r"""Video frame level annotation results for explicit content. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -499,6 +514,7 @@ class NormalizedBoundingBox(proto.Message): class FaceDetectionAnnotation(proto.Message): r"""Face detection annotation. + Attributes: tracks (Sequence[google.cloud.videointelligence_v1.types.Track]): The face tracks with attributes. @@ -515,6 +531,7 @@ class FaceDetectionAnnotation(proto.Message): class PersonDetectionAnnotation(proto.Message): r"""Person detection annotation per video. + Attributes: tracks (Sequence[google.cloud.videointelligence_v1.types.Track]): The detected tracks of a person. @@ -528,6 +545,7 @@ class PersonDetectionAnnotation(proto.Message): class FaceSegment(proto.Message): r"""Video segment level annotation results for face detection. + Attributes: segment (google.cloud.videointelligence_v1.types.VideoSegment): Video segment where a face was detected. @@ -538,6 +556,7 @@ class FaceSegment(proto.Message): class FaceFrame(proto.Message): r"""Deprecated. No effect. + Attributes: normalized_bounding_boxes (Sequence[google.cloud.videointelligence_v1.types.NormalizedBoundingBox]): Normalized Bounding boxes in a frame. @@ -558,6 +577,7 @@ class FaceFrame(proto.Message): class FaceAnnotation(proto.Message): r"""Deprecated. No effect. + Attributes: thumbnail (bytes): Thumbnail of a representative face view (in @@ -606,6 +626,7 @@ class TimestampedObject(proto.Message): class Track(proto.Message): r"""A track of an object instance. + Attributes: segment (google.cloud.videointelligence_v1.types.VideoSegment): Video segment of a track. @@ -675,6 +696,7 @@ class DetectedLandmark(proto.Message): class VideoAnnotationResults(proto.Message): r"""Annotation results for a single video. + Attributes: input_uri (str): Video file location in `Cloud @@ -806,6 +828,7 @@ class AnnotateVideoResponse(proto.Message): class VideoAnnotationProgress(proto.Message): r"""Annotation progress for a single video. + Attributes: input_uri (str): Video file location in `Cloud @@ -851,6 +874,7 @@ class AnnotateVideoProgress(proto.Message): class SpeechTranscriptionConfig(proto.Message): r"""Config for SPEECH_TRANSCRIPTION. + Attributes: language_code (str): Required. *Required* The language of the supplied audio as a @@ -969,6 +993,7 @@ class SpeechTranscription(proto.Message): class SpeechRecognitionAlternative(proto.Message): r"""Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript (str): Transcript text representing the words that @@ -1077,6 +1102,7 @@ class NormalizedBoundingPoly(proto.Message): class TextSegment(proto.Message): r"""Video segment level annotation results for text detection. + Attributes: segment (google.cloud.videointelligence_v1.types.VideoSegment): Video segment where a text snippet was @@ -1155,6 +1181,7 @@ class ObjectTrackingFrame(proto.Message): class ObjectTrackingAnnotation(proto.Message): r"""Annotations corresponding to one tracked object. + Attributes: segment (google.cloud.videointelligence_v1.types.VideoSegment): Non-streaming batch mode ONLY. diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py index dafe7b7c..7208a72d 100644 --- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py @@ -276,6 +276,12 @@ async def annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py index c4cfc802..49053746 100644 --- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py @@ -331,10 +331,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def annotate_video( @@ -438,6 +435,19 @@ def annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py index a49bd4fe..858cf016 100644 --- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py index 9ecf5e82..77b203d6 100644 --- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py @@ -272,5 +272,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py index ddef810d..ca2b00d2 100644 --- a/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py @@ -279,5 +279,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1beta2/types/video_intelligence.py b/google/cloud/videointelligence_v1beta2/types/video_intelligence.py index fb2d38e8..8b0ee23b 100644 --- a/google/cloud/videointelligence_v1beta2/types/video_intelligence.py +++ b/google/cloud/videointelligence_v1beta2/types/video_intelligence.py @@ -80,6 +80,7 @@ class Likelihood(proto.Enum): class AnnotateVideoRequest(proto.Message): r"""Video annotation request. + Attributes: input_uri (str): Input video location. Currently, only `Google Cloud @@ -131,6 +132,7 @@ class AnnotateVideoRequest(proto.Message): class VideoContext(proto.Message): r"""Video context and/or feature-specific parameters. + Attributes: segments (Sequence[google.cloud.videointelligence_v1beta2.types.VideoSegment]): Video segments to annotate. The segments may @@ -164,6 +166,7 @@ class VideoContext(proto.Message): class LabelDetectionConfig(proto.Message): r"""Config for LABEL_DETECTION. + Attributes: label_detection_mode (google.cloud.videointelligence_v1beta2.types.LabelDetectionMode): What labels should be detected with LABEL_DETECTION, in @@ -187,6 +190,7 @@ class LabelDetectionConfig(proto.Message): class ShotChangeDetectionConfig(proto.Message): r"""Config for SHOT_CHANGE_DETECTION. + Attributes: model (str): Model to use for shot change detection. @@ -199,6 +203,7 @@ class ShotChangeDetectionConfig(proto.Message): class ExplicitContentDetectionConfig(proto.Message): r"""Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model (str): Model to use for explicit content detection. @@ -211,6 +216,7 @@ class ExplicitContentDetectionConfig(proto.Message): class FaceDetectionConfig(proto.Message): r"""Config for FACE_DETECTION. + Attributes: model (str): Model to use for face detection. @@ -227,6 +233,7 @@ class FaceDetectionConfig(proto.Message): class VideoSegment(proto.Message): r"""Video segment. + Attributes: start_time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -248,6 +255,7 @@ class VideoSegment(proto.Message): class LabelSegment(proto.Message): r"""Video segment level annotation results for label detection. + Attributes: segment (google.cloud.videointelligence_v1beta2.types.VideoSegment): Video segment where a label was detected. @@ -261,6 +269,7 @@ class LabelSegment(proto.Message): class LabelFrame(proto.Message): r"""Video frame level annotation results for label detection. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -276,6 +285,7 @@ class LabelFrame(proto.Message): class Entity(proto.Message): r"""Detected entity from video analysis. + Attributes: entity_id (str): Opaque entity ID. Some IDs may be available in `Google @@ -294,6 +304,7 @@ class Entity(proto.Message): class LabelAnnotation(proto.Message): r"""Label annotation. + Attributes: entity (google.cloud.videointelligence_v1beta2.types.Entity): Detected entity. @@ -317,6 +328,7 @@ class LabelAnnotation(proto.Message): class ExplicitContentFrame(proto.Message): r"""Video frame level annotation results for explicit content. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -369,6 +381,7 @@ class NormalizedBoundingBox(proto.Message): class FaceSegment(proto.Message): r"""Video segment level annotation results for face detection. + Attributes: segment (google.cloud.videointelligence_v1beta2.types.VideoSegment): Video segment where a face was detected. @@ -379,6 +392,7 @@ class FaceSegment(proto.Message): class FaceFrame(proto.Message): r"""Video frame level annotation results for face detection. + Attributes: normalized_bounding_boxes (Sequence[google.cloud.videointelligence_v1beta2.types.NormalizedBoundingBox]): Normalized Bounding boxes in a frame. @@ -399,6 +413,7 @@ class FaceFrame(proto.Message): class FaceAnnotation(proto.Message): r"""Face annotation. + Attributes: thumbnail (bytes): Thumbnail of a representative face view (in @@ -416,6 +431,7 @@ class FaceAnnotation(proto.Message): class VideoAnnotationResults(proto.Message): r"""Annotation results for a single video. + Attributes: input_uri (str): Video file location in `Google Cloud @@ -486,6 +502,7 @@ class AnnotateVideoResponse(proto.Message): class VideoAnnotationProgress(proto.Message): r"""Annotation progress for a single video. + Attributes: input_uri (str): Video file location in `Google Cloud diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py index 5f3d922e..d67e7b6b 100644 --- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py @@ -276,6 +276,12 @@ async def annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py index 726abf60..cfd996f5 100644 --- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py @@ -331,10 +331,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def annotate_video( @@ -438,6 +435,19 @@ def annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py index e817294f..d15b43c1 100644 --- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py index d94be04b..fe7b4e6d 100644 --- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py @@ -272,5 +272,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py index f039e75f..0ebeb48e 100644 --- a/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py @@ -279,5 +279,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py index 7c529fdf..0fbbbc93 100644 --- a/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py +++ b/google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py @@ -80,6 +80,7 @@ class Likelihood(proto.Enum): class AnnotateVideoRequest(proto.Message): r"""Video annotation request. + Attributes: input_uri (str): Input video location. Currently, only `Google Cloud @@ -131,6 +132,7 @@ class AnnotateVideoRequest(proto.Message): class VideoContext(proto.Message): r"""Video context and/or feature-specific parameters. + Attributes: segments (Sequence[google.cloud.videointelligence_v1p1beta1.types.VideoSegment]): Video segments to annotate. The segments may @@ -164,6 +166,7 @@ class VideoContext(proto.Message): class LabelDetectionConfig(proto.Message): r"""Config for LABEL_DETECTION. + Attributes: label_detection_mode (google.cloud.videointelligence_v1p1beta1.types.LabelDetectionMode): What labels should be detected with LABEL_DETECTION, in @@ -187,6 +190,7 @@ class LabelDetectionConfig(proto.Message): class ShotChangeDetectionConfig(proto.Message): r"""Config for SHOT_CHANGE_DETECTION. + Attributes: model (str): Model to use for shot change detection. @@ -199,6 +203,7 @@ class ShotChangeDetectionConfig(proto.Message): class ExplicitContentDetectionConfig(proto.Message): r"""Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model (str): Model to use for explicit content detection. @@ -211,6 +216,7 @@ class ExplicitContentDetectionConfig(proto.Message): class VideoSegment(proto.Message): r"""Video segment. + Attributes: start_time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -232,6 +238,7 @@ class VideoSegment(proto.Message): class LabelSegment(proto.Message): r"""Video segment level annotation results for label detection. + Attributes: segment (google.cloud.videointelligence_v1p1beta1.types.VideoSegment): Video segment where a label was detected. @@ -245,6 +252,7 @@ class LabelSegment(proto.Message): class LabelFrame(proto.Message): r"""Video frame level annotation results for label detection. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -260,6 +268,7 @@ class LabelFrame(proto.Message): class Entity(proto.Message): r"""Detected entity from video analysis. + Attributes: entity_id (str): Opaque entity ID. Some IDs may be available in `Google @@ -278,6 +287,7 @@ class Entity(proto.Message): class LabelAnnotation(proto.Message): r"""Label annotation. + Attributes: entity (google.cloud.videointelligence_v1p1beta1.types.Entity): Detected entity. @@ -301,6 +311,7 @@ class LabelAnnotation(proto.Message): class ExplicitContentFrame(proto.Message): r"""Video frame level annotation results for explicit content. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -332,6 +343,7 @@ class ExplicitContentAnnotation(proto.Message): class VideoAnnotationResults(proto.Message): r"""Annotation results for a single video. + Attributes: input_uri (str): Output only. Video file location in `Google Cloud @@ -401,6 +413,7 @@ class AnnotateVideoResponse(proto.Message): class VideoAnnotationProgress(proto.Message): r"""Annotation progress for a single video. + Attributes: input_uri (str): Output only. Video file location in `Google Cloud @@ -440,6 +453,7 @@ class AnnotateVideoProgress(proto.Message): class SpeechTranscriptionConfig(proto.Message): r"""Config for SPEECH_TRANSCRIPTION. + Attributes: language_code (str): Required. *Required* The language of the supplied audio as a @@ -530,6 +544,7 @@ class SpeechTranscription(proto.Message): class SpeechRecognitionAlternative(proto.Message): r"""Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript (str): Output only. Transcript text representing the diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py index f3af55da..534fe915 100644 --- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py @@ -276,6 +276,12 @@ async def annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py index 9fce56e7..f35a1aea 100644 --- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py @@ -331,10 +331,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def annotate_video( @@ -438,6 +435,19 @@ def annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py index 486e90ba..42e62bde 100644 --- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py index 0e1f0166..e22650e6 100644 --- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py @@ -272,5 +272,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py index 61775ef4..71a69687 100644 --- a/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py @@ -279,5 +279,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py index 754ad23a..eca3b5a9 100644 --- a/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py +++ b/google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py @@ -85,6 +85,7 @@ class Likelihood(proto.Enum): class AnnotateVideoRequest(proto.Message): r"""Video annotation request. + Attributes: input_uri (str): Input video location. Currently, only `Google Cloud @@ -136,6 +137,7 @@ class AnnotateVideoRequest(proto.Message): class VideoContext(proto.Message): r"""Video context and/or feature-specific parameters. + Attributes: segments (Sequence[google.cloud.videointelligence_v1p2beta1.types.VideoSegment]): Video segments to annotate. The segments may @@ -169,6 +171,7 @@ class VideoContext(proto.Message): class LabelDetectionConfig(proto.Message): r"""Config for LABEL_DETECTION. + Attributes: label_detection_mode (google.cloud.videointelligence_v1p2beta1.types.LabelDetectionMode): What labels should be detected with LABEL_DETECTION, in @@ -192,6 +195,7 @@ class LabelDetectionConfig(proto.Message): class ShotChangeDetectionConfig(proto.Message): r"""Config for SHOT_CHANGE_DETECTION. + Attributes: model (str): Model to use for shot change detection. @@ -204,6 +208,7 @@ class ShotChangeDetectionConfig(proto.Message): class ExplicitContentDetectionConfig(proto.Message): r"""Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model (str): Model to use for explicit content detection. @@ -216,6 +221,7 @@ class ExplicitContentDetectionConfig(proto.Message): class TextDetectionConfig(proto.Message): r"""Config for TEXT_DETECTION. + Attributes: language_hints (Sequence[str]): Language hint can be specified if the @@ -233,6 +239,7 @@ class TextDetectionConfig(proto.Message): class VideoSegment(proto.Message): r"""Video segment. + Attributes: start_time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -254,6 +261,7 @@ class VideoSegment(proto.Message): class LabelSegment(proto.Message): r"""Video segment level annotation results for label detection. + Attributes: segment (google.cloud.videointelligence_v1p2beta1.types.VideoSegment): Video segment where a label was detected. @@ -267,6 +275,7 @@ class LabelSegment(proto.Message): class LabelFrame(proto.Message): r"""Video frame level annotation results for label detection. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -282,6 +291,7 @@ class LabelFrame(proto.Message): class Entity(proto.Message): r"""Detected entity from video analysis. + Attributes: entity_id (str): Opaque entity ID. Some IDs may be available in `Google @@ -300,6 +310,7 @@ class Entity(proto.Message): class LabelAnnotation(proto.Message): r"""Label annotation. + Attributes: entity (google.cloud.videointelligence_v1p2beta1.types.Entity): Detected entity. @@ -323,6 +334,7 @@ class LabelAnnotation(proto.Message): class ExplicitContentFrame(proto.Message): r"""Video frame level annotation results for explicit content. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -375,6 +387,7 @@ class NormalizedBoundingBox(proto.Message): class VideoAnnotationResults(proto.Message): r"""Annotation results for a single video. + Attributes: input_uri (str): Video file location in `Google Cloud @@ -453,6 +466,7 @@ class AnnotateVideoResponse(proto.Message): class VideoAnnotationProgress(proto.Message): r"""Annotation progress for a single video. + Attributes: input_uri (str): Video file location in `Google Cloud @@ -528,6 +542,7 @@ class NormalizedBoundingPoly(proto.Message): class TextSegment(proto.Message): r"""Video segment level annotation results for text detection. + Attributes: segment (google.cloud.videointelligence_v1p2beta1.types.VideoSegment): Video segment where a text snippet was @@ -603,6 +618,7 @@ class ObjectTrackingFrame(proto.Message): class ObjectTrackingAnnotation(proto.Message): r"""Annotations corresponding to one tracked object. + Attributes: entity (google.cloud.videointelligence_v1p2beta1.types.Entity): Entity to specify the object category that diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py index 5b86c803..d4af4ad9 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py @@ -246,6 +246,12 @@ def streaming_annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py index 430b8a91..ce9de815 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py @@ -339,10 +339,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def streaming_annotate_video( @@ -391,6 +388,19 @@ def streaming_annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py index a7693b16..f9f28552 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py @@ -171,6 +171,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def streaming_annotate_video( self, diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py index 2380e158..238b8051 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py @@ -259,5 +259,8 @@ def streaming_annotate_video( ) return self._stubs["streaming_annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("StreamingVideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py index 4de007f2..7978ef1e 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py @@ -262,5 +262,8 @@ def streaming_annotate_video( ) return self._stubs["streaming_annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("StreamingVideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py index b9481b55..ed69072a 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py +++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py @@ -276,6 +276,12 @@ async def annotate_video( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py index cae9d972..6ad5c32a 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py +++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py @@ -331,10 +331,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def annotate_video( @@ -438,6 +435,19 @@ def annotate_video( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py index 64fa95a0..6b8e4121 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py +++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py index 5b686dda..5f075362 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py +++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py @@ -272,5 +272,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcTransport",) diff --git a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py index 8f0c5638..54f3ebc7 100644 --- a/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py +++ b/google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py @@ -279,5 +279,8 @@ def annotate_video( ) return self._stubs["annotate_video"] + def close(self): + return self.grpc_channel.close() + __all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py b/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py index fb3374ee..38e72d63 100644 --- a/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py +++ b/google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py @@ -133,6 +133,7 @@ class Feature(proto.Enum): class AnnotateVideoRequest(proto.Message): r"""Video annotation request. + Attributes: input_uri (str): Input video location. Currently, only `Cloud @@ -183,6 +184,7 @@ class AnnotateVideoRequest(proto.Message): class VideoContext(proto.Message): r"""Video context and/or feature-specific parameters. + Attributes: segments (Sequence[google.cloud.videointelligence_v1p3beta1.types.VideoSegment]): Video segments to annotate. The segments may @@ -236,6 +238,7 @@ class VideoContext(proto.Message): class LabelDetectionConfig(proto.Message): r"""Config for LABEL_DETECTION. + Attributes: label_detection_mode (google.cloud.videointelligence_v1p3beta1.types.LabelDetectionMode): What labels should be detected with LABEL_DETECTION, in @@ -277,6 +280,7 @@ class LabelDetectionConfig(proto.Message): class ShotChangeDetectionConfig(proto.Message): r"""Config for SHOT_CHANGE_DETECTION. + Attributes: model (str): Model to use for shot change detection. @@ -289,6 +293,7 @@ class ShotChangeDetectionConfig(proto.Message): class ObjectTrackingConfig(proto.Message): r"""Config for OBJECT_TRACKING. + Attributes: model (str): Model to use for object tracking. @@ -301,6 +306,7 @@ class ObjectTrackingConfig(proto.Message): class ExplicitContentDetectionConfig(proto.Message): r"""Config for EXPLICIT_CONTENT_DETECTION. + Attributes: model (str): Model to use for explicit content detection. @@ -313,6 +319,7 @@ class ExplicitContentDetectionConfig(proto.Message): class FaceDetectionConfig(proto.Message): r"""Config for FACE_DETECTION. + Attributes: model (str): Model to use for face detection. @@ -334,6 +341,7 @@ class FaceDetectionConfig(proto.Message): class PersonDetectionConfig(proto.Message): r"""Config for PERSON_DETECTION. + Attributes: include_bounding_boxes (bool): Whether bounding boxes are included in the @@ -355,6 +363,7 @@ class PersonDetectionConfig(proto.Message): class TextDetectionConfig(proto.Message): r"""Config for TEXT_DETECTION. + Attributes: language_hints (Sequence[str]): Language hint can be specified if the @@ -377,6 +386,7 @@ class TextDetectionConfig(proto.Message): class VideoSegment(proto.Message): r"""Video segment. + Attributes: start_time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -398,6 +408,7 @@ class VideoSegment(proto.Message): class LabelSegment(proto.Message): r"""Video segment level annotation results for label detection. + Attributes: segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment): Video segment where a label was detected. @@ -411,6 +422,7 @@ class LabelSegment(proto.Message): class LabelFrame(proto.Message): r"""Video frame level annotation results for label detection. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -426,6 +438,7 @@ class LabelFrame(proto.Message): class Entity(proto.Message): r"""Detected entity from video analysis. + Attributes: entity_id (str): Opaque entity ID. Some IDs may be available in `Google @@ -444,6 +457,7 @@ class Entity(proto.Message): class LabelAnnotation(proto.Message): r"""Label annotation. + Attributes: entity (google.cloud.videointelligence_v1p3beta1.types.Entity): Detected entity. @@ -467,6 +481,7 @@ class LabelAnnotation(proto.Message): class ExplicitContentFrame(proto.Message): r"""Video frame level annotation results for explicit content. + Attributes: time_offset (google.protobuf.duration_pb2.Duration): Time-offset, relative to the beginning of the @@ -550,6 +565,7 @@ class TimestampedObject(proto.Message): class Track(proto.Message): r"""A track of an object instance. + Attributes: segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment): Video segment of a track. @@ -597,6 +613,7 @@ class DetectedAttribute(proto.Message): class Celebrity(proto.Message): r"""Celebrity definition. + Attributes: name (str): The resource name of the celebrity. Have the format @@ -630,6 +647,7 @@ class CelebrityTrack(proto.Message): class RecognizedCelebrity(proto.Message): r"""The recognized celebrity with confidence score. + Attributes: celebrity (google.cloud.videointelligence_v1p3beta1.types.Celebrity): The recognized celebrity. @@ -648,6 +666,7 @@ class RecognizedCelebrity(proto.Message): class CelebrityRecognitionAnnotation(proto.Message): r"""Celebrity recognition annotation per video. + Attributes: celebrity_tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.CelebrityTrack]): The tracks detected from the input video, @@ -684,6 +703,7 @@ class DetectedLandmark(proto.Message): class FaceDetectionAnnotation(proto.Message): r"""Face detection annotation. + Attributes: tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.Track]): The face tracks with attributes. @@ -697,6 +717,7 @@ class FaceDetectionAnnotation(proto.Message): class PersonDetectionAnnotation(proto.Message): r"""Person detection annotation per video. + Attributes: tracks (Sequence[google.cloud.videointelligence_v1p3beta1.types.Track]): The detected tracks of a person. @@ -707,6 +728,7 @@ class PersonDetectionAnnotation(proto.Message): class VideoAnnotationResults(proto.Message): r"""Annotation results for a single video. + Attributes: input_uri (str): Video file location in `Cloud @@ -837,6 +859,7 @@ class AnnotateVideoResponse(proto.Message): class VideoAnnotationProgress(proto.Message): r"""Annotation progress for a single video. + Attributes: input_uri (str): Video file location in `Cloud @@ -882,6 +905,7 @@ class AnnotateVideoProgress(proto.Message): class SpeechTranscriptionConfig(proto.Message): r"""Config for SPEECH_TRANSCRIPTION. + Attributes: language_code (str): Required. *Required* The language of the supplied audio as a @@ -1000,6 +1024,7 @@ class SpeechTranscription(proto.Message): class SpeechRecognitionAlternative(proto.Message): r"""Alternative hypotheses (a.k.a. n-best list). + Attributes: transcript (str): Transcript text representing the words that @@ -1108,6 +1133,7 @@ class NormalizedBoundingPoly(proto.Message): class TextSegment(proto.Message): r"""Video segment level annotation results for text detection. + Attributes: segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment): Video segment where a text snippet was @@ -1183,6 +1209,7 @@ class ObjectTrackingFrame(proto.Message): class ObjectTrackingAnnotation(proto.Message): r"""Annotations corresponding to one tracked object. + Attributes: segment (google.cloud.videointelligence_v1p3beta1.types.VideoSegment): Non-streaming batch mode ONLY. @@ -1410,11 +1437,13 @@ class StreamingVideoAnnotationResults(proto.Message): class StreamingShotChangeDetectionConfig(proto.Message): - r"""Config for STREAMING_SHOT_CHANGE_DETECTION. """ + r"""Config for STREAMING_SHOT_CHANGE_DETECTION. + """ class StreamingLabelDetectionConfig(proto.Message): r"""Config for STREAMING_LABEL_DETECTION. + Attributes: stationary_camera (bool): Whether the video has been captured from a @@ -1427,15 +1456,18 @@ class StreamingLabelDetectionConfig(proto.Message): class StreamingExplicitContentDetectionConfig(proto.Message): - r"""Config for STREAMING_EXPLICIT_CONTENT_DETECTION. """ + r"""Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + """ class StreamingObjectTrackingConfig(proto.Message): - r"""Config for STREAMING_OBJECT_TRACKING. """ + r"""Config for STREAMING_OBJECT_TRACKING. + """ class StreamingAutomlActionRecognitionConfig(proto.Message): r"""Config for STREAMING_AUTOML_ACTION_RECOGNITION. + Attributes: model_name (str): Resource name of AutoML model. Format: @@ -1447,6 +1479,7 @@ class StreamingAutomlActionRecognitionConfig(proto.Message): class StreamingAutomlClassificationConfig(proto.Message): r"""Config for STREAMING_AUTOML_CLASSIFICATION. + Attributes: model_name (str): Resource name of AutoML model. Format: @@ -1458,6 +1491,7 @@ class StreamingAutomlClassificationConfig(proto.Message): class StreamingAutomlObjectTrackingConfig(proto.Message): r"""Config for STREAMING_AUTOML_OBJECT_TRACKING. + Attributes: model_name (str): Resource name of AutoML model. Format: @@ -1469,6 +1503,7 @@ class StreamingAutomlObjectTrackingConfig(proto.Message): class StreamingStorageConfig(proto.Message): r"""Config for streaming storage option. + Attributes: enable_storage_annotation_result (bool): Enable streaming storage. Default: false. diff --git a/tests/unit/gapic/videointelligence_v1/test_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1/test_video_intelligence_service.py index 8dcc1cc8..ab317273 100644 --- a/tests/unit/gapic/videointelligence_v1/test_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1/test_video_intelligence_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1.services.video_intelligence_service import ( @@ -802,6 +803,9 @@ def test_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1295,3 +1299,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/videointelligence_v1beta2/test_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1beta2/test_video_intelligence_service.py index e70020d6..a912f431 100644 --- a/tests/unit/gapic/videointelligence_v1beta2/test_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1beta2/test_video_intelligence_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1beta2.services.video_intelligence_service import ( @@ -802,6 +803,9 @@ def test_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1295,3 +1299,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/videointelligence_v1p1beta1/test_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1p1beta1/test_video_intelligence_service.py index 976c799d..6d7e3060 100644 --- a/tests/unit/gapic/videointelligence_v1p1beta1/test_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1p1beta1/test_video_intelligence_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1p1beta1.services.video_intelligence_service import ( @@ -802,6 +803,9 @@ def test_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1295,3 +1299,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py index 58987fce..7a0470bc 100644 --- a/tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import ( @@ -802,6 +803,9 @@ def test_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1295,3 +1299,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/videointelligence_v1p3beta1/test_streaming_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1p3beta1/test_streaming_video_intelligence_service.py index 714b6439..20eb15fa 100644 --- a/tests/unit/gapic/videointelligence_v1p3beta1/test_streaming_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1p3beta1/test_streaming_video_intelligence_service.py @@ -29,6 +29,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1p3beta1.services.streaming_video_intelligence_service import ( @@ -732,6 +733,9 @@ def test_streaming_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + @requires_google_auth_gte_1_25_0 def test_streaming_video_intelligence_service_base_transport_with_credentials_file(): @@ -1213,3 +1217,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = StreamingVideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = StreamingVideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = StreamingVideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py b/tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py index f20fdab8..e5448040 100644 --- a/tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py +++ b/tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.videointelligence_v1p3beta1.services.video_intelligence_service import ( @@ -802,6 +803,9 @@ def test_video_intelligence_service_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -1295,3 +1299,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VideoIntelligenceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = VideoIntelligenceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called()