Skip to content
This repository has been archived by the owner on Nov 29, 2023. It is now read-only.

Commit

Permalink
feat: add context manager support in client (#229)
Browse files Browse the repository at this point in the history
- [ ] Regenerate this pull request now.

chore: fix docstring for first attribute of protos

committer: @busunkim96
PiperOrigin-RevId: 401271153

Source-Link: googleapis/googleapis@787f8c9

Source-Link: googleapis/googleapis-gen@81decff
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODFkZWNmZmU5ZmM3MjM5NmE4MTUzZTc1NmQxZDY3YTZlZWNmZDYyMCJ9
  • Loading branch information
gcf-owl-bot[bot] committed Oct 7, 2021
1 parent e490fb8 commit ac75850
Show file tree
Hide file tree
Showing 41 changed files with 623 additions and 27 deletions.
Expand Up @@ -276,6 +276,12 @@ async def annotate_video(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -331,10 +331,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def annotate_video(
Expand Down Expand Up @@ -438,6 +435,19 @@ def annotate_video(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
Expand Down
Expand Up @@ -272,5 +272,8 @@ def annotate_video(
)
return self._stubs["annotate_video"]

def close(self):
self.grpc_channel.close()


__all__ = ("VideoIntelligenceServiceGrpcTransport",)
Expand Up @@ -279,5 +279,8 @@ def annotate_video(
)
return self._stubs["annotate_video"]

def close(self):
return self.grpc_channel.close()


__all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",)
27 changes: 27 additions & 0 deletions google/cloud/videointelligence_v1/types/video_intelligence.py
Expand Up @@ -107,6 +107,7 @@ class Likelihood(proto.Enum):

class AnnotateVideoRequest(proto.Message):
r"""Video annotation request.
Attributes:
input_uri (str):
Input video location. Currently, only `Cloud
Expand Down Expand Up @@ -157,6 +158,7 @@ class AnnotateVideoRequest(proto.Message):

class VideoContext(proto.Message):
r"""Video context and/or feature-specific parameters.
Attributes:
segments (Sequence[google.cloud.videointelligence_v1.types.VideoSegment]):
Video segments to annotate. The segments may
Expand Down Expand Up @@ -210,6 +212,7 @@ class VideoContext(proto.Message):

class LabelDetectionConfig(proto.Message):
r"""Config for LABEL_DETECTION.
Attributes:
label_detection_mode (google.cloud.videointelligence_v1.types.LabelDetectionMode):
What labels should be detected with LABEL_DETECTION, in
Expand Down Expand Up @@ -251,6 +254,7 @@ class LabelDetectionConfig(proto.Message):

class ShotChangeDetectionConfig(proto.Message):
r"""Config for SHOT_CHANGE_DETECTION.
Attributes:
model (str):
Model to use for shot change detection.
Expand All @@ -263,6 +267,7 @@ class ShotChangeDetectionConfig(proto.Message):

class ObjectTrackingConfig(proto.Message):
r"""Config for OBJECT_TRACKING.
Attributes:
model (str):
Model to use for object tracking.
Expand All @@ -275,6 +280,7 @@ class ObjectTrackingConfig(proto.Message):

class FaceDetectionConfig(proto.Message):
r"""Config for FACE_DETECTION.
Attributes:
model (str):
Model to use for face detection.
Expand All @@ -296,6 +302,7 @@ class FaceDetectionConfig(proto.Message):

class PersonDetectionConfig(proto.Message):
r"""Config for PERSON_DETECTION.
Attributes:
include_bounding_boxes (bool):
Whether bounding boxes are included in the
Expand All @@ -317,6 +324,7 @@ class PersonDetectionConfig(proto.Message):

class ExplicitContentDetectionConfig(proto.Message):
r"""Config for EXPLICIT_CONTENT_DETECTION.
Attributes:
model (str):
Model to use for explicit content detection.
Expand All @@ -329,6 +337,7 @@ class ExplicitContentDetectionConfig(proto.Message):

class TextDetectionConfig(proto.Message):
r"""Config for TEXT_DETECTION.
Attributes:
language_hints (Sequence[str]):
Language hint can be specified if the
Expand All @@ -351,6 +360,7 @@ class TextDetectionConfig(proto.Message):

class VideoSegment(proto.Message):
r"""Video segment.
Attributes:
start_time_offset (google.protobuf.duration_pb2.Duration):
Time-offset, relative to the beginning of the
Expand All @@ -372,6 +382,7 @@ class VideoSegment(proto.Message):

class LabelSegment(proto.Message):
r"""Video segment level annotation results for label detection.
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment where a label was detected.
Expand All @@ -385,6 +396,7 @@ class LabelSegment(proto.Message):

class LabelFrame(proto.Message):
r"""Video frame level annotation results for label detection.
Attributes:
time_offset (google.protobuf.duration_pb2.Duration):
Time-offset, relative to the beginning of the
Expand All @@ -400,6 +412,7 @@ class LabelFrame(proto.Message):

class Entity(proto.Message):
r"""Detected entity from video analysis.
Attributes:
entity_id (str):
Opaque entity ID. Some IDs may be available in `Google
Expand All @@ -418,6 +431,7 @@ class Entity(proto.Message):

class LabelAnnotation(proto.Message):
r"""Label annotation.
Attributes:
entity (google.cloud.videointelligence_v1.types.Entity):
Detected entity.
Expand All @@ -444,6 +458,7 @@ class LabelAnnotation(proto.Message):

class ExplicitContentFrame(proto.Message):
r"""Video frame level annotation results for explicit content.
Attributes:
time_offset (google.protobuf.duration_pb2.Duration):
Time-offset, relative to the beginning of the
Expand Down Expand Up @@ -499,6 +514,7 @@ class NormalizedBoundingBox(proto.Message):

class FaceDetectionAnnotation(proto.Message):
r"""Face detection annotation.
Attributes:
tracks (Sequence[google.cloud.videointelligence_v1.types.Track]):
The face tracks with attributes.
Expand All @@ -515,6 +531,7 @@ class FaceDetectionAnnotation(proto.Message):

class PersonDetectionAnnotation(proto.Message):
r"""Person detection annotation per video.
Attributes:
tracks (Sequence[google.cloud.videointelligence_v1.types.Track]):
The detected tracks of a person.
Expand All @@ -528,6 +545,7 @@ class PersonDetectionAnnotation(proto.Message):

class FaceSegment(proto.Message):
r"""Video segment level annotation results for face detection.
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment where a face was detected.
Expand All @@ -538,6 +556,7 @@ class FaceSegment(proto.Message):

class FaceFrame(proto.Message):
r"""Deprecated. No effect.
Attributes:
normalized_bounding_boxes (Sequence[google.cloud.videointelligence_v1.types.NormalizedBoundingBox]):
Normalized Bounding boxes in a frame.
Expand All @@ -558,6 +577,7 @@ class FaceFrame(proto.Message):

class FaceAnnotation(proto.Message):
r"""Deprecated. No effect.
Attributes:
thumbnail (bytes):
Thumbnail of a representative face view (in
Expand Down Expand Up @@ -606,6 +626,7 @@ class TimestampedObject(proto.Message):

class Track(proto.Message):
r"""A track of an object instance.
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment of a track.
Expand Down Expand Up @@ -675,6 +696,7 @@ class DetectedLandmark(proto.Message):

class VideoAnnotationResults(proto.Message):
r"""Annotation results for a single video.
Attributes:
input_uri (str):
Video file location in `Cloud
Expand Down Expand Up @@ -806,6 +828,7 @@ class AnnotateVideoResponse(proto.Message):

class VideoAnnotationProgress(proto.Message):
r"""Annotation progress for a single video.
Attributes:
input_uri (str):
Video file location in `Cloud
Expand Down Expand Up @@ -851,6 +874,7 @@ class AnnotateVideoProgress(proto.Message):

class SpeechTranscriptionConfig(proto.Message):
r"""Config for SPEECH_TRANSCRIPTION.
Attributes:
language_code (str):
Required. *Required* The language of the supplied audio as a
Expand Down Expand Up @@ -969,6 +993,7 @@ class SpeechTranscription(proto.Message):

class SpeechRecognitionAlternative(proto.Message):
r"""Alternative hypotheses (a.k.a. n-best list).
Attributes:
transcript (str):
Transcript text representing the words that
Expand Down Expand Up @@ -1077,6 +1102,7 @@ class NormalizedBoundingPoly(proto.Message):

class TextSegment(proto.Message):
r"""Video segment level annotation results for text detection.
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Video segment where a text snippet was
Expand Down Expand Up @@ -1155,6 +1181,7 @@ class ObjectTrackingFrame(proto.Message):

class ObjectTrackingAnnotation(proto.Message):
r"""Annotations corresponding to one tracked object.
Attributes:
segment (google.cloud.videointelligence_v1.types.VideoSegment):
Non-streaming batch mode ONLY.
Expand Down
Expand Up @@ -276,6 +276,12 @@ async def annotate_video(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -331,10 +331,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def annotate_video(
Expand Down Expand Up @@ -438,6 +435,19 @@ def annotate_video(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -173,6 +173,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
Expand Down
Expand Up @@ -272,5 +272,8 @@ def annotate_video(
)
return self._stubs["annotate_video"]

def close(self):
self.grpc_channel.close()


__all__ = ("VideoIntelligenceServiceGrpcTransport",)
Expand Up @@ -279,5 +279,8 @@ def annotate_video(
)
return self._stubs["annotate_video"]

def close(self):
return self.grpc_channel.close()


__all__ = ("VideoIntelligenceServiceGrpcAsyncIOTransport",)

0 comments on commit ac75850

Please sign in to comment.