Skip to content
This repository has been archived by the owner on Feb 13, 2024. It is now read-only.

Commit

Permalink
feat: add context manager support in client (#247)
Browse files Browse the repository at this point in the history
- [ ] Regenerate this pull request now.

chore: fix docstring for first attribute of protos

committer: @busunkim96
PiperOrigin-RevId: 401271153

Source-Link: googleapis/googleapis@787f8c9

Source-Link: googleapis/googleapis-gen@81decff
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODFkZWNmZmU5ZmM3MjM5NmE4MTUzZTc1NmQxZDY3YTZlZWNmZDYyMCJ9
  • Loading branch information
gcf-owl-bot[bot] committed Oct 7, 2021
1 parent ca56a71 commit 629365f
Show file tree
Hide file tree
Showing 75 changed files with 937 additions and 32 deletions.
Expand Up @@ -525,6 +525,12 @@ async def async_batch_annotate_files(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
18 changes: 14 additions & 4 deletions google/cloud/vision_v1/services/image_annotator/client.py
Expand Up @@ -365,10 +365,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def batch_annotate_images(
Expand Down Expand Up @@ -704,6 +701,19 @@ def async_batch_annotate_files(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -219,6 +219,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
Expand Down
Expand Up @@ -384,5 +384,8 @@ def async_batch_annotate_files(
)
return self._stubs["async_batch_annotate_files"]

def close(self):
self.grpc_channel.close()


__all__ = ("ImageAnnotatorGrpcTransport",)
Expand Up @@ -391,5 +391,8 @@ def async_batch_annotate_files(
)
return self._stubs["async_batch_annotate_files"]

def close(self):
return self.grpc_channel.close()


__all__ = ("ImageAnnotatorGrpcAsyncIOTransport",)
Expand Up @@ -2080,6 +2080,12 @@ async def purge_products(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
18 changes: 14 additions & 4 deletions google/cloud/vision_v1/services/product_search/client.py
Expand Up @@ -404,10 +404,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def create_product_set(
Expand Down Expand Up @@ -2141,6 +2138,19 @@ def purge_products(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
Expand Down
Expand Up @@ -430,6 +430,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
Expand Down
Expand Up @@ -951,5 +951,8 @@ def purge_products(
)
return self._stubs["purge_products"]

def close(self):
self.grpc_channel.close()


__all__ = ("ProductSearchGrpcTransport",)
Expand Up @@ -968,5 +968,8 @@ def purge_products(
)
return self._stubs["purge_products"]

def close(self):
return self.grpc_channel.close()


__all__ = ("ProductSearchGrpcAsyncIOTransport",)
1 change: 1 addition & 0 deletions google/cloud/vision_v1/types/geometry.py
Expand Up @@ -56,6 +56,7 @@ class NormalizedVertex(proto.Message):

class BoundingPoly(proto.Message):
r"""A bounding polygon for the detected image annotation.
Attributes:
vertices (Sequence[google.cloud.vision_v1.types.Vertex]):
The bounding polygon vertices.
Expand Down
23 changes: 23 additions & 0 deletions google/cloud/vision_v1/types/image_annotator.py
Expand Up @@ -164,6 +164,7 @@ class ImageSource(proto.Message):

class Image(proto.Message):
r"""Client image to perform Google Cloud Vision API tasks over.
Attributes:
content (bytes):
Image content, represented as a stream of bytes. Note: As
Expand Down Expand Up @@ -242,6 +243,7 @@ class FaceAnnotation(proto.Message):

class Landmark(proto.Message):
r"""A face-specific landmark (for example, a face feature).
Attributes:
type_ (google.cloud.vision_v1.types.FaceAnnotation.Landmark.Type):
Face landmark type.
Expand Down Expand Up @@ -317,6 +319,7 @@ class Type(proto.Enum):

class LocationInfo(proto.Message):
r"""Detected entity location information.
Attributes:
lat_lng (google.type.latlng_pb2.LatLng):
lat/long location coordinates.
Expand All @@ -327,6 +330,7 @@ class LocationInfo(proto.Message):

class Property(proto.Message):
r"""A ``Property`` consists of a user-supplied name/value pair.
Attributes:
name (str):
Name of the property.
Expand All @@ -343,6 +347,7 @@ class Property(proto.Message):

class EntityAnnotation(proto.Message):
r"""Set of detected entity features.
Attributes:
mid (str):
Opaque entity ID. Some IDs may be available in `Google
Expand Down Expand Up @@ -398,6 +403,7 @@ class EntityAnnotation(proto.Message):

class LocalizedObjectAnnotation(proto.Message):
r"""Set of detected objects with bounding boxes.
Attributes:
mid (str):
Object ID that should align with
Expand Down Expand Up @@ -484,6 +490,7 @@ class SafeSearchAnnotation(proto.Message):

class LatLongRect(proto.Message):
r"""Rectangle determined by min and max ``LatLng`` pairs.
Attributes:
min_lat_lng (google.type.latlng_pb2.LatLng):
Min lat/long pair.
Expand Down Expand Up @@ -516,6 +523,7 @@ class ColorInfo(proto.Message):

class DominantColorsAnnotation(proto.Message):
r"""Set of dominant colors and their corresponding scores.
Attributes:
colors (Sequence[google.cloud.vision_v1.types.ColorInfo]):
RGB color values with their score and pixel
Expand All @@ -527,6 +535,7 @@ class DominantColorsAnnotation(proto.Message):

class ImageProperties(proto.Message):
r"""Stores image properties, such as dominant colors.
Attributes:
dominant_colors (google.cloud.vision_v1.types.DominantColorsAnnotation):
If present, dominant colors completed
Expand Down Expand Up @@ -573,6 +582,7 @@ class CropHintsAnnotation(proto.Message):

class CropHintsParams(proto.Message):
r"""Parameters for crop hints annotation request.
Attributes:
aspect_ratios (Sequence[float]):
Aspect ratios in floats, representing the
Expand All @@ -590,6 +600,7 @@ class CropHintsParams(proto.Message):

class WebDetectionParams(proto.Message):
r"""Parameters for web detection request.
Attributes:
include_geo_results (bool):
Whether to include results derived from the
Expand All @@ -615,6 +626,7 @@ class TextDetectionParams(proto.Message):

class ImageContext(proto.Message):
r"""Image context and/or feature-specific parameters.
Attributes:
lat_long_rect (google.cloud.vision_v1.types.LatLongRect):
Not used.
Expand Down Expand Up @@ -694,6 +706,7 @@ class ImageAnnotationContext(proto.Message):

class AnnotateImageResponse(proto.Message):
r"""Response to an image annotation request.
Attributes:
face_annotations (Sequence[google.cloud.vision_v1.types.FaceAnnotation]):
If present, face detection has completed
Expand Down Expand Up @@ -814,6 +827,7 @@ class BatchAnnotateImagesRequest(proto.Message):

class BatchAnnotateImagesResponse(proto.Message):
r"""Response to a batch image annotation request.
Attributes:
responses (Sequence[google.cloud.vision_v1.types.AnnotateImageResponse]):
Individual responses to image annotation
Expand Down Expand Up @@ -922,6 +936,7 @@ class BatchAnnotateFilesRequest(proto.Message):

class BatchAnnotateFilesResponse(proto.Message):
r"""A list of file annotation responses.
Attributes:
responses (Sequence[google.cloud.vision_v1.types.AnnotateFileResponse]):
The list of file annotation responses, each
Expand All @@ -937,6 +952,7 @@ class BatchAnnotateFilesResponse(proto.Message):

class AsyncAnnotateFileRequest(proto.Message):
r"""An offline file annotation request.
Attributes:
input_config (google.cloud.vision_v1.types.InputConfig):
Required. Information about the input file.
Expand All @@ -958,6 +974,7 @@ class AsyncAnnotateFileRequest(proto.Message):

class AsyncAnnotateFileResponse(proto.Message):
r"""The response for a single offline file annotation request.
Attributes:
output_config (google.cloud.vision_v1.types.OutputConfig):
The output location and metadata from
Expand All @@ -969,6 +986,7 @@ class AsyncAnnotateFileResponse(proto.Message):

class AsyncBatchAnnotateImagesRequest(proto.Message):
r"""Request for async image annotation for a list of images.
Attributes:
requests (Sequence[google.cloud.vision_v1.types.AnnotateImageRequest]):
Required. Individual image annotation
Expand Down Expand Up @@ -1000,6 +1018,7 @@ class AsyncBatchAnnotateImagesRequest(proto.Message):

class AsyncBatchAnnotateImagesResponse(proto.Message):
r"""Response to an async batch image annotation request.
Attributes:
output_config (google.cloud.vision_v1.types.OutputConfig):
The output location and metadata from
Expand Down Expand Up @@ -1040,6 +1059,7 @@ class AsyncBatchAnnotateFilesRequest(proto.Message):

class AsyncBatchAnnotateFilesResponse(proto.Message):
r"""Response to an async batch file annotation request.
Attributes:
responses (Sequence[google.cloud.vision_v1.types.AsyncAnnotateFileResponse]):
The list of file annotation responses, one
Expand All @@ -1054,6 +1074,7 @@ class AsyncBatchAnnotateFilesResponse(proto.Message):

class InputConfig(proto.Message):
r"""The desired input location and metadata.
Attributes:
gcs_source (google.cloud.vision_v1.types.GcsSource):
The Google Cloud Storage location to read the
Expand All @@ -1079,6 +1100,7 @@ class InputConfig(proto.Message):

class OutputConfig(proto.Message):
r"""The desired output location and metadata.
Attributes:
gcs_destination (google.cloud.vision_v1.types.GcsDestination):
The Google Cloud Storage location to write
Expand Down Expand Up @@ -1154,6 +1176,7 @@ class GcsDestination(proto.Message):

class OperationMetadata(proto.Message):
r"""Contains metadata for the BatchAnnotateImages operation.
Attributes:
state (google.cloud.vision_v1.types.OperationMetadata.State):
Current state of the batch operation.
Expand Down
4 changes: 4 additions & 0 deletions google/cloud/vision_v1/types/product_search.py
Expand Up @@ -28,6 +28,7 @@

class ProductSearchParams(proto.Message):
r"""Parameters for a product search request.
Attributes:
bounding_poly (google.cloud.vision_v1.types.BoundingPoly):
The bounding polygon around the area of
Expand Down Expand Up @@ -74,6 +75,7 @@ class ProductSearchParams(proto.Message):

class ProductSearchResults(proto.Message):
r"""Results for a product search request.
Attributes:
index_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp of the index which provided these
Expand All @@ -93,6 +95,7 @@ class ProductSearchResults(proto.Message):

class Result(proto.Message):
r"""Information about a product.
Attributes:
product (google.cloud.vision_v1.types.Product):
The Product.
Expand All @@ -112,6 +115,7 @@ class Result(proto.Message):

class ObjectAnnotation(proto.Message):
r"""Prediction for what the object in the bounding box is.
Attributes:
mid (str):
Object ID that should align with
Expand Down

0 comments on commit 629365f

Please sign in to comment.