From b31bd87c3fa8cad32768611a52d5effcc7d9b3e2 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Thu, 11 Feb 2021 10:12:46 -0500 Subject: [PATCH] feat!: microgenerator changes (#203) Release-As: v2.0.0-dev1 * wip microgenerator changes * fix unit tests * fix system tests * lint * fixup after update * fix test * regen * run fixup script * run fixup script admin * add scripts to build * regenerate * update synth * fix tests * more test cleanup * fix mutate rows * fix backups test * fix tests * fix docs and lint * fix docs and lint * temporarily put coverage at 99 * code feedback * move coverage to 99 * pin beam bigtable deps to 1.0.0 * pin beam bigtable deps to 1.0.0 * fix imports * fixup keepalive config * lint * cleanup * cleanup * cleanup --- .coveragerc | 4 +- docs/conf.py | 5 +- docs/snippets.py | 8 +- google/cloud/bigtable/app_profile.py | 42 +- google/cloud/bigtable/backup.py | 76 +- google/cloud/bigtable/client.py | 57 +- google/cloud/bigtable/cluster.py | 28 +- google/cloud/bigtable/column_family.py | 17 +- google/cloud/bigtable/enums.py | 52 +- google/cloud/bigtable/instance.py | 56 +- google/cloud/bigtable/row.py | 4 +- google/cloud/bigtable/row_data.py | 18 +- google/cloud/bigtable/row_filters.py | 2 +- google/cloud/bigtable/row_set.py | 2 +- google/cloud/bigtable/table.py | 84 +- google/cloud/bigtable_admin_v2/__init__.py | 167 +- .../cloud/bigtable_admin_v2/gapic/__init__.py | 0 .../gapic/bigtable_instance_admin_client.py | 1919 ------ .../bigtable_instance_admin_client_config.py | 136 - .../gapic/bigtable_table_admin_client.py | 2336 ------- .../bigtable_table_admin_client_config.py | 160 - .../gapic/transports/__init__.py | 0 .../bigtable_instance_admin_grpc_transport.py | 380 -- .../bigtable_table_admin_grpc_transport.py | 471 -- .../cloud/bigtable_admin_v2/proto/__init__.py | 0 .../proto/bigtable_cluster_data.proto | 94 - .../proto/bigtable_cluster_service.proto | 130 - .../bigtable_cluster_service_messages.proto | 141 - .../proto/bigtable_instance_admin.proto | 11 +- .../proto/bigtable_instance_admin_pb2.py | 2434 ------- .../proto/bigtable_instance_admin_pb2_grpc.py | 880 --- .../proto/bigtable_table_admin.proto | 327 +- .../proto/bigtable_table_admin_pb2.py | 3578 ---------- .../proto/bigtable_table_admin_pb2_grpc.py | 1083 --- .../proto/bigtable_table_data.proto | 126 - .../proto/bigtable_table_service.proto | 80 - .../bigtable_table_service_messages.proto | 116 - .../bigtable_admin_v2/proto/common_pb2.py | 190 - .../proto/common_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/instance_pb2.py | 893 --- .../proto/instance_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/table_pb2.py | 1694 ----- .../bigtable_admin_v2/proto/table_pb2_grpc.py | 3 - google/cloud/bigtable_admin_v2/py.typed | 2 + .../bigtable_admin_v2/services/__init__.py | 16 + .../bigtable_instance_admin/__init__.py | 24 + .../bigtable_instance_admin/async_client.py | 1935 ++++++ .../bigtable_instance_admin/client.py | 2069 ++++++ .../bigtable_instance_admin/pagers.py | 153 + .../transports/__init__.py | 37 + .../transports/base.py | 491 ++ .../transports/grpc.py | 794 +++ .../transports/grpc_asyncio.py | 822 +++ .../bigtable_table_admin}/__init__.py | 21 +- .../bigtable_table_admin/async_client.py | 2284 +++++++ .../services/bigtable_table_admin/client.py | 2473 +++++++ .../services/bigtable_table_admin/pagers.py | 405 ++ .../transports/__init__.py | 37 + .../bigtable_table_admin/transports/base.py | 517 ++ .../bigtable_table_admin/transports/grpc.py | 944 +++ .../transports/grpc_asyncio.py | 962 +++ google/cloud/bigtable_admin_v2/types.py | 76 - .../cloud/bigtable_admin_v2/types/__init__.py | 158 + .../types/bigtable_instance_admin.py | 530 ++ .../types/bigtable_table_admin.py | 912 +++ .../cloud/bigtable_admin_v2/types/common.py | 58 + .../cloud/bigtable_admin_v2/types/instance.py | 209 + google/cloud/bigtable_admin_v2/types/table.py | 376 + google/cloud/bigtable_v2/__init__.py | 75 +- google/cloud/bigtable_v2/gapic/__init__.py | 0 .../bigtable_v2/gapic/bigtable_client.py | 779 --- .../gapic/bigtable_client_config.py | 80 - .../bigtable_v2/gapic/transports/__init__.py | 0 .../transports/bigtable_grpc_transport.py | 207 - google/cloud/bigtable_v2/proto/__init__.py | 0 .../proto/bigtable_cluster_data.proto | 94 - .../proto/bigtable_cluster_service.proto | 130 - .../bigtable_cluster_service_messages.proto | 141 - .../bigtable_v2/proto/bigtable_data.proto | 516 -- .../proto/bigtable_instance_admin.proto | 456 -- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 1804 ----- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 313 - .../bigtable_v2/proto/bigtable_service.proto | 91 - .../proto/bigtable_service_messages.proto | 218 - .../proto/bigtable_table_admin.proto | 525 -- .../proto/bigtable_table_data.proto | 126 - .../proto/bigtable_table_service.proto | 80 - .../bigtable_table_service_messages.proto | 116 - google/cloud/bigtable_v2/proto/common.proto | 41 - google/cloud/bigtable_v2/proto/data_pb2.py | 2672 -------- .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 3 - google/cloud/bigtable_v2/proto/instance.proto | 208 - google/cloud/bigtable_v2/proto/table.proto | 221 - google/cloud/bigtable_v2/py.typed | 2 + google/cloud/bigtable_v2/services/__init__.py | 16 + .../bigtable_v2/services/bigtable/__init__.py | 24 + .../services/bigtable/async_client.py | 865 +++ .../bigtable_v2/services/bigtable/client.py | 1041 +++ .../services/bigtable/transports/__init__.py | 35 + .../services/bigtable/transports/base.py | 254 + .../services/bigtable/transports/grpc.py | 432 ++ .../bigtable/transports/grpc_asyncio.py | 440 ++ google/cloud/bigtable_v2/types.py | 54 - google/cloud/bigtable_v2/types/__init__.py | 72 + google/cloud/bigtable_v2/types/bigtable.py | 463 ++ google/cloud/bigtable_v2/types/data.py | 728 ++ noxfile.py | 118 +- samples/beam/noxfile.py | 3 +- samples/beam/requirements.txt | 2 +- scripts/fixup_bigtable_admin_v2_keywords.py | 216 + scripts/fixup_bigtable_v2_keywords.py | 184 + setup.py | 63 +- synth.py | 55 +- tests/system.py | 29 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 1 + .../test_bigtable_instance_admin.py | 5316 +++++++++++++++ .../test_bigtable_table_admin.py | 6067 +++++++++++++++++ tests/unit/gapic/bigtable_v2/__init__.py | 1 + tests/unit/gapic/bigtable_v2/test_bigtable.py | 2372 +++++++ .../unit/gapic/v2/test_bigtable_client_v2.py | 316 - .../test_bigtable_instance_admin_client_v2.py | 924 --- .../v2/test_bigtable_table_admin_client_v2.py | 1039 --- tests/unit/test_app_profile.py | 210 +- tests/unit/test_backup.py | 246 +- tests/unit/test_client.py | 58 +- tests/unit/test_cluster.py | 143 +- tests/unit/test_column_family.py | 58 +- tests/unit/test_instance.py | 261 +- tests/unit/test_policy.py | 6 +- tests/unit/test_row.py | 50 +- tests/unit/test_row_data.py | 129 +- tests/unit/test_row_filters.py | 14 +- tests/unit/test_row_set.py | 4 +- tests/unit/test_table.py | 591 +- 134 files changed, 36477 insertions(+), 29436 deletions(-) delete mode 100644 google/cloud/bigtable_admin_v2/gapic/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py delete mode 100644 google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/__init__.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto delete mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2.py delete mode 100644 google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py create mode 100644 google/cloud/bigtable_admin_v2/py.typed create mode 100644 google/cloud/bigtable_admin_v2/services/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py rename google/{ => cloud/bigtable_admin_v2/services/bigtable_table_admin}/__init__.py (69%) create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py create mode 100644 google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py delete mode 100644 google/cloud/bigtable_admin_v2/types.py create mode 100644 google/cloud/bigtable_admin_v2/types/__init__.py create mode 100644 google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py create mode 100644 google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py create mode 100644 google/cloud/bigtable_admin_v2/types/common.py create mode 100644 google/cloud/bigtable_admin_v2/types/instance.py create mode 100644 google/cloud/bigtable_admin_v2/types/table.py delete mode 100644 google/cloud/bigtable_v2/gapic/__init__.py delete mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client.py delete mode 100644 google/cloud/bigtable_v2/gapic/bigtable_client_config.py delete mode 100644 google/cloud/bigtable_v2/gapic/transports/__init__.py delete mode 100644 google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py delete mode 100644 google/cloud/bigtable_v2/proto/__init__.py delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_data.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2.py delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_service.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_service_messages.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_table_admin.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_table_data.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_table_service.proto delete mode 100644 google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto delete mode 100644 google/cloud/bigtable_v2/proto/common.proto delete mode 100644 google/cloud/bigtable_v2/proto/data_pb2.py delete mode 100644 google/cloud/bigtable_v2/proto/data_pb2_grpc.py delete mode 100644 google/cloud/bigtable_v2/proto/instance.proto delete mode 100644 google/cloud/bigtable_v2/proto/table.proto create mode 100644 google/cloud/bigtable_v2/py.typed create mode 100644 google/cloud/bigtable_v2/services/__init__.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/__init__.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/async_client.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/client.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/transports/__init__.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/transports/base.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/transports/grpc.py create mode 100644 google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py delete mode 100644 google/cloud/bigtable_v2/types.py create mode 100644 google/cloud/bigtable_v2/types/__init__.py create mode 100644 google/cloud/bigtable_v2/types/bigtable.py create mode 100644 google/cloud/bigtable_v2/types/data.py create mode 100644 scripts/fixup_bigtable_admin_v2_keywords.py create mode 100644 scripts/fixup_bigtable_v2_keywords.py create mode 100644 tests/unit/gapic/bigtable_admin_v2/__init__.py create mode 100644 tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py create mode 100644 tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py create mode 100644 tests/unit/gapic/bigtable_v2/__init__.py create mode 100644 tests/unit/gapic/bigtable_v2/test_bigtable.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py delete mode 100644 tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/.coveragerc b/.coveragerc index 0d8e6297d..b11c3eaa3 100644 --- a/.coveragerc +++ b/.coveragerc @@ -21,7 +21,7 @@ omit = google/cloud/__init__.py [report] -fail_under = 100 +fail_under = 99 show_missing = True exclude_lines = # Re-enable the standard pragma @@ -30,6 +30,8 @@ exclude_lines = def __repr__ # Ignore abstract methods raise NotImplementedError + # Ignore setuptools-less fallback + except pkg_resources.DistributionNotFound: omit = */gapic/*.py */proto/*.py diff --git a/docs/conf.py b/docs/conf.py index ef2392b38..dc4b4d822 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -347,10 +347,7 @@ intersphinx_mapping = { "python": ("https://python.readthedocs.org/en/latest/", None), "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), - "google.api_core": ( - "https://googleapis.dev/python/google-api-core/latest/", - None, - ), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/docs/snippets.py b/docs/snippets.py index dda59079d..eeb39c3bb 100644 --- a/docs/snippets.py +++ b/docs/snippets.py @@ -704,13 +704,13 @@ def test_bigtable_cluster_name(): def test_bigtable_instance_from_pb(): # [START bigtable_api_instance_from_pb] from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) instance = client.instance(INSTANCE_ID) name = instance.name - instance_pb = instance_pb2.Instance( + instance_pb = data_v2_pb2.Instance( name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS ) @@ -723,7 +723,7 @@ def test_bigtable_instance_from_pb(): def test_bigtable_cluster_from_pb(): # [START bigtable_api_cluster_from_pb] from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -732,7 +732,7 @@ def test_bigtable_cluster_from_pb(): name = cluster.name cluster_state = cluster.state serve_nodes = 1 - cluster_pb = instance_pb2.Cluster( + cluster_pb = data_v2_pb2.Cluster( name=name, location=LOCATION_ID, state=cluster_state, diff --git a/google/cloud/bigtable/app_profile.py b/google/cloud/bigtable/app_profile.py index ebf817c4e..5d6dbdb81 100644 --- a/google/cloud/bigtable/app_profile.py +++ b/google/cloud/bigtable/app_profile.py @@ -18,7 +18,7 @@ import re from google.cloud.bigtable.enums import RoutingPolicyType -from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable_admin_v2.types import instance from google.protobuf import field_mask_pb2 from google.api_core.exceptions import NotFound @@ -138,7 +138,7 @@ def __ne__(self, other): def from_pb(cls, app_profile_pb, instance): """Creates an instance app_profile from a protobuf. - :type app_profile_pb: :class:`instance_pb2.app_profile_pb` + :type app_profile_pb: :class:`instance.app_profile_pb` :param app_profile_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` @@ -188,7 +188,7 @@ def _update_from_pb(self, app_profile_pb): self.description = app_profile_pb.description routing_policy_type = None - if app_profile_pb.HasField("multi_cluster_routing_use_any"): + if app_profile_pb._pb.HasField("multi_cluster_routing_use_any"): routing_policy_type = RoutingPolicyType.ANY self.allow_transactional_writes = False else: @@ -201,7 +201,7 @@ def _update_from_pb(self, app_profile_pb): def _to_pb(self): """Create an AppProfile proto buff message for API calls - :rtype: :class:`.instance_pb2.AppProfile` + :rtype: :class:`.instance.AppProfile` :returns: The converted current object. :raises: :class:`ValueError ` if the AppProfile @@ -215,15 +215,15 @@ def _to_pb(self): if self.routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny() + instance.AppProfile.MultiClusterRoutingUseAny() ) else: - single_cluster_routing = instance_pb2.AppProfile.SingleClusterRouting( + single_cluster_routing = instance.AppProfile.SingleClusterRouting( cluster_id=self.cluster_id, allow_transactional_writes=self.allow_transactional_writes, ) - app_profile_pb = instance_pb2.AppProfile( + app_profile_pb = instance.AppProfile( name=self.name, description=self.description, multi_cluster_routing_use_any=multi_cluster_routing_use_any, @@ -242,7 +242,9 @@ def reload(self): :dedent: 4 """ - app_profile_pb = self.instance_admin_client.get_app_profile(self.name) + app_profile_pb = self.instance_admin_client.get_app_profile( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # app_profile ID on the response match the request. @@ -262,7 +264,7 @@ def exists(self): :returns: True if the AppProfile exists, else False. """ try: - self.instance_admin_client.get_app_profile(self.name) + self.instance_admin_client.get_app_profile(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -291,10 +293,12 @@ def create(self, ignore_warnings=None): """ return self.from_pb( self.instance_admin_client.create_app_profile( - parent=self._instance.name, - app_profile_id=self.app_profile_id, - app_profile=self._to_pb(), - ignore_warnings=ignore_warnings, + request={ + "parent": self._instance.name, + "app_profile_id": self.app_profile_id, + "app_profile": self._to_pb(), + "ignore_warnings": ignore_warnings, + } ), self._instance, ) @@ -328,9 +332,11 @@ def update(self, ignore_warnings=None): update_mask_pb.paths.append("single_cluster_routing") return self.instance_admin_client.update_app_profile( - app_profile=self._to_pb(), - update_mask=update_mask_pb, - ignore_warnings=ignore_warnings, + request={ + "app_profile": self._to_pb(), + "update_mask": update_mask_pb, + "ignore_warnings": ignore_warnings, + } ) def delete(self, ignore_warnings=None): @@ -352,4 +358,6 @@ def delete(self, ignore_warnings=None): If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - self.instance_admin_client.delete_app_profile(self.name, ignore_warnings) + self.instance_admin_client.delete_app_profile( + request={"name": self.name, "ignore_warnings": ignore_warnings} + ) diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py index 291ac783a..6dead1f74 100644 --- a/google/cloud/bigtable/backup.py +++ b/google/cloud/bigtable/backup.py @@ -17,10 +17,8 @@ import re from google.cloud._helpers import _datetime_to_pb_timestamp -from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( - BigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.types import table_pb2 +from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -220,7 +218,7 @@ def state(self): def from_pb(cls, backup_pb, instance): """Creates a Backup instance from a protobuf message. - :type backup_pb: :class:`table_pb2.Backup` + :type backup_pb: :class:`table.Backup` :param backup_pb: A Backup protobuf object. :type instance: :class:`Instance ` @@ -256,7 +254,7 @@ def from_pb(cls, backup_pb, instance): match = _TABLE_NAME_RE.match(backup_pb.source_table) table_id = match.group("table_id") if match else None - expire_time = backup_pb.expire_time + expire_time = backup_pb._pb.expire_time backup = cls( backup_id, @@ -265,10 +263,10 @@ def from_pb(cls, backup_pb, instance): table_id=table_id, expire_time=expire_time, ) - backup._start_time = backup_pb.start_time - backup._end_time = backup_pb.end_time - backup._size_bytes = backup_pb.size_bytes - backup._state = backup_pb.state + backup._start_time = backup_pb._pb.start_time + backup._end_time = backup_pb._pb.end_time + backup._size_bytes = backup_pb._pb.size_bytes + backup._state = backup_pb._pb.state return backup @@ -308,13 +306,19 @@ def create(self, cluster_id=None): if not self._cluster: raise ValueError('"cluster" parameter must be set') - backup = table_pb2.Backup( + backup = table.Backup( source_table=self.source_table, expire_time=_datetime_to_pb_timestamp(self.expire_time), ) - api = self._instance._client.table_admin_client - return api.create_backup(self.parent, self.backup_id, backup) + api = self._instance._client._table_admin_client + return api.create_backup( + request={ + "parent": self.parent, + "backup_id": self.backup_id, + "backup": backup, + } + ) def get(self): """Retrieves metadata of a pending or completed Backup. @@ -328,9 +332,9 @@ def get(self): due to a retryable error and retry attempts failed. :raises ValueError: If the parameters are invalid. """ - api = self._instance._client.table_admin_client + api = self._instance._client._table_admin_client try: - return api.get_backup(self.name) + return api.get_backup(request={"name": self.name}) except NotFound: return None @@ -338,11 +342,11 @@ def reload(self): """Refreshes the stored backup properties.""" backup = self.get() self._source_table = backup.source_table - self._expire_time = backup.expire_time - self._start_time = backup.start_time - self._end_time = backup.end_time - self._size_bytes = backup.size_bytes - self._state = backup.state + self._expire_time = backup._pb.expire_time + self._start_time = backup._pb.start_time + self._end_time = backup._pb.end_time + self._size_bytes = backup._pb.size_bytes + self._state = backup._pb.state def exists(self): """Tests whether this Backup exists. @@ -358,18 +362,19 @@ def update_expire_time(self, new_expire_time): :type new_expire_time: :class:`datetime.datetime` :param new_expire_time: the new expiration time timestamp """ - backup_update = table_pb2.Backup( - name=self.name, - expire_time=_datetime_to_pb_timestamp(new_expire_time), + backup_update = table.Backup( + name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api = self._instance._client.table_admin_client - api.update_backup(backup_update, update_mask) + api = self._instance._client._table_admin_client + api.update_backup(request={"backup": backup_update, "update_mask": update_mask}) self._expire_time = new_expire_time def delete(self): """Delete this Backup.""" - self._instance._client.table_admin_client.delete_backup(self.name) + self._instance._client._table_admin_client.delete_backup( + request={"name": self.name} + ) def restore(self, table_id): """Creates a new Table by restoring from this Backup. The new Table @@ -391,8 +396,14 @@ def restore(self, table_id): due to a retryable error and retry attempts failed. :raises: ValueError: If the parameters are invalid. """ - api = self._instance._client.table_admin_client - return api.restore_table(self._instance.name, table_id, self.name) + api = self._instance._client._table_admin_client + return api.restore_table( + request={ + "parent": self._instance.name, + "table_id": table_id, + "backup": self.name, + } + ) def get_iam_policy(self): """Gets the IAM access control policy for this backup. @@ -401,8 +412,7 @@ def get_iam_policy(self): :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client - args = {"resource": self.name} - response = table_api.get_iam_policy(**args) + response = table_api.get_iam_policy(request={"resource": self.name}) return Policy.from_pb(response) def set_iam_policy(self, policy): @@ -420,7 +430,9 @@ class `google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client - response = table_api.set_iam_policy(resource=self.name, policy=policy.to_pb()) + response = table_api.set_iam_policy( + request={"resource": self.name, "policy": policy.to_pb()} + ) return Policy.from_pb(response) def test_iam_permissions(self, permissions): @@ -441,6 +453,6 @@ def test_iam_permissions(self, permissions): """ table_api = self._instance._client.table_admin_client response = table_api.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(response.permissions) diff --git a/google/cloud/bigtable/client.py b/google/cloud/bigtable/client.py index 703a1bd60..5e49934d0 100644 --- a/google/cloud/bigtable/client.py +++ b/google/cloud/bigtable/client.py @@ -35,10 +35,12 @@ from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, - bigtable_instance_admin_grpc_transport, +from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports import ( + BigtableInstanceAdminGrpcTransport, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports import ( + BigtableTableAdminGrpcTransport, ) from google.cloud.bigtable import __version__ @@ -47,14 +49,14 @@ from google.cloud.client import ClientWithProject -from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE from google.cloud.environment_vars import BIGTABLE_EMULATOR -INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION -INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT -INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED +INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION +INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT +INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" @@ -187,9 +189,7 @@ def __init__( self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__( - project=project, - credentials=credentials, - client_options=client_options, + project=project, credentials=credentials, client_options=client_options, ) def _get_scopes(self): @@ -212,11 +212,11 @@ def _create_gapic_client_channel(self, client_class, grpc_transport): if self._client_options and self._client_options.api_endpoint: api_endpoint = self._client_options.api_endpoint else: - api_endpoint = client_class.SERVICE_ADDRESS + api_endpoint = client_class.DEFAULT_ENDPOINT channel = grpc_transport.create_channel( - api_endpoint, - self._credentials, + host=api_endpoint, + credentials=self._credentials, options={ "grpc.max_send_message_length": -1, "grpc.max_receive_message_length": -1, @@ -224,11 +224,7 @@ def _create_gapic_client_channel(self, client_class, grpc_transport): "grpc.keepalive_timeout_ms": 10000, }.items(), ) - transport = grpc_transport( - address=api_endpoint, - channel=channel, - credentials=None, - ) + transport = grpc_transport(channel=channel, host=api_endpoint) return transport @property @@ -254,7 +250,7 @@ def project_path(self): :rtype: str :returns: Return a fully-qualified project string. """ - return self.instance_admin_client.project_path(self.project) + return self.instance_admin_client.common_project_path(self.project) @property def table_data_client(self): @@ -272,8 +268,7 @@ def table_data_client(self): """ if self._table_data_client is None: transport = self._create_gapic_client_channel( - bigtable_v2.BigtableClient, - bigtable_grpc_transport.BigtableGrpcTransport, + bigtable_v2.BigtableClient, BigtableGrpcTransport, ) klass = _create_gapic_client( bigtable_v2.BigtableClient, @@ -306,7 +301,7 @@ def table_admin_client(self): transport = self._create_gapic_client_channel( bigtable_admin_v2.BigtableTableAdminClient, - bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, + BigtableTableAdminGrpcTransport, ) klass = _create_gapic_client( bigtable_admin_v2.BigtableTableAdminClient, @@ -339,7 +334,7 @@ def instance_admin_client(self): transport = self._create_gapic_client_channel( bigtable_admin_v2.BigtableInstanceAdminClient, - bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, + BigtableInstanceAdminGrpcTransport, ) klass = _create_gapic_client( bigtable_admin_v2.BigtableInstanceAdminClient, @@ -372,10 +367,10 @@ def instance(self, instance_id, display_name=None, instance_type=None, labels=No :param instance_type: (Optional) The type of the instance. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. - :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, + :data:`google.cloud.bigtable.instance.InstanceType.PRODUCTION`. + :data:`google.cloud.bigtable.instance.InstanceType.DEVELOPMENT`, Defaults to - :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. + :data:`google.cloud.bigtable.instance.InstanceType.UNSPECIFIED`. :type labels: dict :param labels: (Optional) Labels are a flexible and lightweight @@ -416,7 +411,9 @@ def list_instances(self): 'failed_locations' is a list of locations which could not be resolved. """ - resp = self.instance_admin_client.list_instances(self.project_path) + resp = self.instance_admin_client.list_instances( + request={"parent": self.project_path} + ) instances = [Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations @@ -438,7 +435,9 @@ def list_clusters(self): locations which could not be resolved. """ resp = self.instance_admin_client.list_clusters( - self.instance_admin_client.instance_path(self.project, "-") + request={ + "parent": self.instance_admin_client.instance_path(self.project, "-") + } ) clusters = [] instances = {} diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py index 1cf66f86b..5c4c355ff 100644 --- a/google/cloud/bigtable/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -16,7 +16,7 @@ import re -from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable_admin_v2.types import instance from google.api_core.exceptions import NotFound @@ -101,7 +101,7 @@ def from_pb(cls, cluster_pb, instance): :end-before: [END bigtable_api_cluster_from_pb] :dedent: 4 - :type cluster_pb: :class:`instance_pb2.Cluster` + :type cluster_pb: :class:`instance.Cluster` :param cluster_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` @@ -211,7 +211,9 @@ def reload(self): :end-before: [END bigtable_api_reload_cluster] :dedent: 4 """ - cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) + cluster_pb = self._instance._client.instance_admin_client.get_cluster( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # cluster ID on the response match the request. @@ -232,7 +234,7 @@ def exists(self): """ client = self._instance._client try: - client.instance_admin_client.get_cluster(name=self.name) + client.instance_admin_client.get_cluster(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -269,7 +271,11 @@ def create(self): cluster_pb = self._to_pb() return client.instance_admin_client.create_cluster( - self._instance.name, self.cluster_id, cluster_pb + request={ + "parent": self._instance.name, + "cluster_id": self.cluster_id, + "cluster": cluster_pb, + } ) def update(self): @@ -302,7 +308,11 @@ def update(self): # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - name=self.name, serve_nodes=self.serve_nodes, location=None + request={ + "serve_nodes": self.serve_nodes, + "name": self.name, + "location": None, + } ) def delete(self): @@ -333,15 +343,15 @@ def delete(self): permanently deleted. """ client = self._instance._client - client.instance_admin_client.delete_cluster(self.name) + client.instance_admin_client.delete_cluster(request={"name": self.name}) def _to_pb(self): """ Create cluster proto buff message for API calls """ client = self._instance._client - location = client.instance_admin_client.location_path( + location = client.instance_admin_client.common_location_path( client.project, self.location_id ) - cluster_pb = instance_pb2.Cluster( + cluster_pb = instance.Cluster( location=location, serve_nodes=self.serve_nodes, default_storage_type=self.default_storage_type, diff --git a/google/cloud/bigtable/column_family.py b/google/cloud/bigtable/column_family.py index eb854cb8b..466011923 100644 --- a/google/cloud/bigtable/column_family.py +++ b/google/cloud/bigtable/column_family.py @@ -16,9 +16,9 @@ from google.cloud import _helpers -from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, +from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 +from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) @@ -275,7 +275,7 @@ def create(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) def update(self): @@ -302,7 +302,7 @@ def update(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) def delete(self): @@ -324,7 +324,7 @@ def delete(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) @@ -341,15 +341,14 @@ def _gc_rule_from_pb(gc_rule_pb): :raises: :class:`ValueError ` if the rule name is unexpected. """ - rule_name = gc_rule_pb.WhichOneof("rule") + rule_name = gc_rule_pb._pb.WhichOneof("rule") if rule_name is None: return None if rule_name == "max_num_versions": return MaxVersionsGCRule(gc_rule_pb.max_num_versions) elif rule_name == "max_age": - max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) - return MaxAgeGCRule(max_age) + return MaxAgeGCRule(gc_rule_pb.max_age) elif rule_name == "union": return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules]) elif rule_name == "intersection": diff --git a/google/cloud/bigtable/enums.py b/google/cloud/bigtable/enums.py index f0965779f..50c7f2e60 100644 --- a/google/cloud/bigtable/enums.py +++ b/google/cloud/bigtable/enums.py @@ -13,7 +13,9 @@ # limitations under the License. """Wrappers for gapic enum types.""" -from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import table class StorageType(object): @@ -26,9 +28,9 @@ class StorageType(object): HDD (int): Magnetic drive (HDD) storage should be used. """ - UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED - SSD = enums.StorageType.SSD - HDD = enums.StorageType.HDD + UNSPECIFIED = common.StorageType.STORAGE_TYPE_UNSPECIFIED + SSD = common.StorageType.SSD + HDD = common.StorageType.HDD class Instance(object): @@ -45,9 +47,9 @@ class State(object): destroyed if the creation process encounters an error. """ - NOT_KNOWN = enums.Instance.State.STATE_NOT_KNOWN - READY = enums.Instance.State.READY - CREATING = enums.Instance.State.CREATING + NOT_KNOWN = instance.Instance.State.STATE_NOT_KNOWN + READY = instance.Instance.State.READY + CREATING = instance.Instance.State.CREATING class Type(object): """ @@ -70,9 +72,9 @@ class Type(object): must not be set. """ - UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED - PRODUCTION = enums.Instance.Type.PRODUCTION - DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED + PRODUCTION = instance.Instance.Type.PRODUCTION + DEVELOPMENT = instance.Instance.Type.DEVELOPMENT class Cluster(object): @@ -96,11 +98,11 @@ class State(object): still exist, but no operations can be performed on the cluster. """ - NOT_KNOWN = enums.Cluster.State.STATE_NOT_KNOWN - READY = enums.Cluster.State.READY - CREATING = enums.Cluster.State.CREATING - RESIZING = enums.Cluster.State.RESIZING - DISABLED = enums.Cluster.State.DISABLED + NOT_KNOWN = instance.Cluster.State.STATE_NOT_KNOWN + READY = instance.Cluster.State.READY + CREATING = instance.Cluster.State.CREATING + RESIZING = instance.Cluster.State.RESIZING + DISABLED = instance.Cluster.State.DISABLED class RoutingPolicyType(object): @@ -150,11 +152,11 @@ class View(object): FULL (int): Populates all fields. """ - VIEW_UNSPECIFIED = enums.Table.View.VIEW_UNSPECIFIED - NAME_ONLY = enums.Table.View.NAME_ONLY - SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW - REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW - FULL = enums.Table.View.FULL + VIEW_UNSPECIFIED = table.Table.View.VIEW_UNSPECIFIED + NAME_ONLY = table.Table.View.NAME_ONLY + SCHEMA_VIEW = table.Table.View.SCHEMA_VIEW + REPLICATION_VIEW = table.Table.View.REPLICATION_VIEW + FULL = table.Table.View.FULL class ReplicationState(object): """ @@ -180,12 +182,12 @@ class ReplicationState(object): reflect the state of the table in other clusters. """ - STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enums.Table.ClusterState.ReplicationState.INITIALIZING + STATE_NOT_KNOWN = table.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN + INITIALIZING = table.Table.ClusterState.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE + table.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE ) UNPLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE + table.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE ) - READY = enums.Table.ClusterState.ReplicationState.READY + READY = table.Table.ClusterState.ReplicationState.READY diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py index a126ee27a..d2fb5db07 100644 --- a/google/cloud/bigtable/instance.py +++ b/google/cloud/bigtable/instance.py @@ -22,7 +22,9 @@ from google.protobuf import field_mask_pb2 -from google.cloud.bigtable_admin_v2.types import instance_pb2, options_pb2 +from google.cloud.bigtable_admin_v2.types import instance + +from google.iam.v1 import options_pb2 from google.api_core.exceptions import NotFound @@ -121,7 +123,7 @@ def _update_from_pb(self, instance_pb): if not instance_pb.display_name: # Simple field (string) raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name - self.type_ = instance_pb.type + self.type_ = instance_pb.type_ self.labels = dict(instance_pb.labels) self._state = instance_pb.state @@ -136,7 +138,7 @@ def from_pb(cls, instance_pb, client): :end-before: [END bigtable_api_instance_from_pb] :dedent: 4 - :type instance_pb: :class:`instance_pb2.Instance` + :type instance_pb: :class:`instance.Instance` :param instance_pb: An instance protobuf object. :type client: :class:`Client ` @@ -314,17 +316,19 @@ def create( simultaneously." ) - instance_pb = instance_pb2.Instance( - display_name=self.display_name, type=self.type_, labels=self.labels + instance_pb = instance.Instance( + display_name=self.display_name, type_=self.type_, labels=self.labels ) parent = self._client.project_path return self._client.instance_admin_client.create_instance( - parent=parent, - instance_id=self.instance_id, - instance=instance_pb, - clusters={c.cluster_id: c._to_pb() for c in clusters}, + request={ + "parent": parent, + "instance_id": self.instance_id, + "instance": instance_pb, + "clusters": {c.cluster_id: c._to_pb() for c in clusters}, + } ) def exists(self): @@ -341,7 +345,7 @@ def exists(self): :returns: True if the table exists, else False. """ try: - self._client.instance_admin_client.get_instance(name=self.name) + self._client.instance_admin_client.get_instance(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -357,7 +361,9 @@ def reload(self): :end-before: [END bigtable_api_reload_instance] :dedent: 4 """ - instance_pb = self._client.instance_admin_client.get_instance(self.name) + instance_pb = self._client.instance_admin_client.get_instance( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # instance ID on the response match the request. @@ -399,15 +405,15 @@ def update(self): update_mask_pb.paths.append("type") if self.labels is not None: update_mask_pb.paths.append("labels") - instance_pb = instance_pb2.Instance( + instance_pb = instance.Instance( name=self.name, display_name=self.display_name, - type=self.type_, + type_=self.type_, labels=self.labels, ) return self._client.instance_admin_client.partial_update_instance( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) def delete(self): @@ -439,7 +445,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - self._client.instance_admin_client.delete_instance(name=self.name) + self._client.instance_admin_client.delete_instance(request={"name": self.name}) def get_iam_policy(self, requested_policy_version=None): """Gets the access control policy for an instance resource. @@ -474,7 +480,7 @@ def get_iam_policy(self, requested_policy_version=None): instance_admin_client = self._client.instance_admin_client - resp = instance_admin_client.get_iam_policy(**args) + resp = instance_admin_client.get_iam_policy(request=args) return Policy.from_pb(resp) def set_iam_policy(self, policy): @@ -500,7 +506,7 @@ class `google.cloud.bigtable.policy.Policy` """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_pb() + request={"resource": self.name, "policy": policy.to_pb()} ) return Policy.from_pb(resp) @@ -529,7 +535,7 @@ def test_iam_permissions(self, permissions): """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(resp.permissions) @@ -596,7 +602,9 @@ def list_clusters(self): 'failed_locations' is a list of locations which could not be resolved. """ - resp = self._client.instance_admin_client.list_clusters(self.name) + resp = self._client.instance_admin_client.list_clusters( + request={"parent": self.name} + ) clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations @@ -641,10 +649,12 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - table_list_pb = self._client.table_admin_client.list_tables(self.name) + table_list_pb = self._client.table_admin_client.list_tables( + request={"parent": self.name} + ) result = [] - for table_pb in table_list_pb: + for table_pb in table_list_pb.tables: table_prefix = self.name + "/tables/" if not table_pb.name.startswith(table_prefix): raise ValueError( @@ -725,5 +735,7 @@ def list_app_profiles(self): :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ - resp = self._client.instance_admin_client.list_app_profiles(self.name) + resp = self._client.instance_admin_client.list_app_profiles( + request={"parent": self.name} + ) return [AppProfile.from_pb(app_profile, self) for app_profile in resp] diff --git a/google/cloud/bigtable/row.py b/google/cloud/bigtable/row.py index f3e4231e1..1898ea772 100644 --- a/google/cloud/bigtable/row.py +++ b/google/cloud/bigtable/row.py @@ -22,7 +22,7 @@ from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _PACK_I64 = struct.Struct(">q").pack @@ -307,7 +307,7 @@ def get_mutations_size(self): mutation_size = 0 for mutation in self._get_mutations(): - mutation_size += mutation.ByteSize() + mutation_size += mutation._pb.ByteSize() return mutation_size diff --git a/google/cloud/bigtable/row_data.py b/google/cloud/bigtable/row_data.py index 1cc442f2c..0d22e2fc6 100644 --- a/google/cloud/bigtable/row_data.py +++ b/google/cloud/bigtable/row_data.py @@ -24,8 +24,8 @@ from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." _MISSING_COLUMN = ( @@ -537,11 +537,11 @@ def _process_chunk(self, chunk): def _update_cell(self, chunk): if self._cell is None: qualifier = None - if chunk.HasField("qualifier"): - qualifier = chunk.qualifier.value + if "qualifier" in chunk: + qualifier = chunk.qualifier family = None - if chunk.HasField("family_name"): - family = chunk.family_name.value + if "family_name" in chunk: + family = chunk.family_name self._cell = PartialCellData( chunk.row_key, @@ -571,8 +571,8 @@ def _validate_chunk_reset_row(self, chunk): # No reset with other keys _raise_if(chunk.row_key) - _raise_if(chunk.HasField("family_name")) - _raise_if(chunk.HasField("qualifier")) + _raise_if("family_name" in chunk) + _raise_if("qualifier" in chunk) _raise_if(chunk.timestamp_micros) _raise_if(chunk.labels) _raise_if(chunk.value_size) @@ -638,7 +638,7 @@ def build_updated_request(self): # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, # add row_range that starts with last_scanned_key as start_key_open # to request only rows that have not been returned yet - if not self.message.HasField("rows"): + if "rows" not in self.message: row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key) r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range]) else: diff --git a/google/cloud/bigtable/row_filters.py b/google/cloud/bigtable/row_filters.py index 973ba9565..b495fb646 100644 --- a/google/cloud/bigtable/row_filters.py +++ b/google/cloud/bigtable/row_filters.py @@ -19,7 +19,7 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _PACK_I64 = struct.Struct(">q").pack diff --git a/google/cloud/bigtable/row_set.py b/google/cloud/bigtable/row_set.py index 7697af4f7..0269d8761 100644 --- a/google/cloud/bigtable/row_set.py +++ b/google/cloud/bigtable/row_set.py @@ -141,7 +141,7 @@ def _update_message_request(self, message): for each in self.row_ranges: r_kwrags = each.get_range_kwargs() - message.rows.row_ranges.add(**r_kwrags) + message.rows.row_ranges.append(r_kwrags) class RowRange(object): diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 887b74b02..740a65ae6 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -13,7 +13,6 @@ # limitations under the License. """User-friendly container for Google Cloud Bigtable Table.""" - from google.api_core import timeout from google.api_core.exceptions import Aborted from google.api_core.exceptions import DeadlineExceeded @@ -38,13 +37,11 @@ from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( - BigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.proto import table_pb2 as admin_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, +from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 +from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 +from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_messages_v2_pb2, ) import warnings @@ -157,7 +154,7 @@ def get_iam_policy(self): :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client - resp = table_client.get_iam_policy(resource=self.name) + resp = table_client.get_iam_policy(request={"resource": self.name}) return Policy.from_pb(resp) def set_iam_policy(self, policy): @@ -182,7 +179,9 @@ class `google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client - resp = table_client.set_iam_policy(resource=self.name, policy=policy.to_pb()) + resp = table_client.set_iam_policy( + request={"resource": self.name, "policy": policy.to_pb()} + ) return Policy.from_pb(resp) def test_iam_permissions(self, permissions): @@ -210,7 +209,7 @@ def test_iam_permissions(self, permissions): """ table_client = self._instance._client.table_admin_client resp = table_client.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(resp.permissions) @@ -363,7 +362,7 @@ def create(self, initial_split_keys=[], column_families={}): .. note:: A create request returns a - :class:`._generated.table_pb2.Table` but we don't use + :class:`._generated.table.Table` but we don't use this response. :type initial_split_keys: list @@ -389,10 +388,12 @@ def create(self, initial_split_keys=[], column_families={}): splits = [split(key=_to_bytes(key)) for key in initial_split_keys] table_client.create_table( - parent=instance_name, - table_id=self.table_id, - table=table, - initial_splits=splits, + request={ + "parent": instance_name, + "table_id": self.table_id, + "table": table, + "initial_splits": splits, + } ) def exists(self): @@ -410,7 +411,7 @@ def exists(self): """ table_client = self._instance._client.table_admin_client try: - table_client.get_table(name=self.name, view=VIEW_NAME_ONLY) + table_client.get_table(request={"name": self.name, "view": VIEW_NAME_ONLY}) return True except NotFound: return False @@ -426,7 +427,7 @@ def delete(self): :dedent: 4 """ table_client = self._instance._client.table_admin_client - table_client.delete_table(name=self.name) + table_client.delete_table(request={"name": self.name}) def list_column_families(self): """List the column families owned by this table. @@ -447,7 +448,7 @@ def list_column_families(self): name from the column family ID. """ table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name) + table_pb = table_client.get_table(request={"name": self.name}) result = {} for column_family_id, value_pb in table_pb.column_families.items(): @@ -474,7 +475,9 @@ def get_cluster_states(self): REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW) + table_pb = table_client.get_table( + request={"name": self.name, "view": REPLICATION_VIEW} + ) return { cluster_id: ClusterState(value_pb.replication_state) @@ -582,7 +585,7 @@ def read_rows( row_set=row_set, ) data_client = self._instance._client.table_data_client - return PartialRowsData(data_client.transport.read_rows, request_pb, retry) + return PartialRowsData(data_client.read_rows, request_pb, retry) def yield_rows(self, **kwargs): """Read rows from this table. @@ -716,7 +719,7 @@ def sample_row_keys(self): """ data_client = self._instance._client.table_data_client response_iterator = data_client.sample_row_keys( - self.name, app_profile_id=self._app_profile_id + request={"table_name": self.name, "app_profile_id": self._app_profile_id} ) return response_iterator @@ -745,11 +748,12 @@ def truncate(self, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True, timeout=timeout + request={"name": self.name, "delete_all_data_from_table": True}, + timeout=timeout, ) else: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True + request={"name": self.name, "delete_all_data_from_table": True} ) def drop_by_prefix(self, row_key_prefix, timeout=None): @@ -780,11 +784,15 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix), timeout=timeout + request={ + "name": self.name, + "row_key_prefix": _to_bytes(row_key_prefix), + }, + timeout=timeout, ) else: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix) + request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): @@ -926,14 +934,16 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0 ) client = self._instance._client.table_admin_client backup_list_pb = client.list_backups( - parent=parent, - filter_=backups_filter, - order_by=order_by, - page_size=page_size, + request={ + "parent": parent, + "filter": backups_filter, + "order_by": order_by, + "page_size": page_size, + } ) result = [] - for backup_pb in backup_list_pb: + for backup_pb in backup_list_pb.backups: result.append(Backup.from_pb(backup_pb, self._instance)) return result @@ -982,7 +992,13 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non cluster=cluster_id, backup=backup_id, ) - return api.restore_table(self._instance.name, new_table_id, backup_name) + return api.restore_table( + request={ + "parent": self._instance.name, + "table_id": new_table_id, + "backup": backup_name, + } + ) class _RetryableMutateRowsWorker(object): @@ -1073,8 +1089,8 @@ def _do_mutate_retryable_rows(self): try: responses = data_client.mutate_rows( - self.table_name, - entries, + table_name=self.table_name, + entries=entries, app_profile_id=self.app_profile_id, retry=None, **kwargs diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 9f72d4f53..423742502 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -1,54 +1,153 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_admin_v2 import types -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client -from google.cloud.bigtable_admin_v2.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableInstanceAdminClient( - bigtable_instance_admin_client.BigtableInstanceAdminClient -): - __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ - enums = enums - - -class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): - __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ - enums = enums +from .services.bigtable_instance_admin import BigtableInstanceAdminClient +from .services.bigtable_table_admin import BigtableTableAdminClient +from .types.bigtable_instance_admin import CreateAppProfileRequest +from .types.bigtable_instance_admin import CreateClusterMetadata +from .types.bigtable_instance_admin import CreateClusterRequest +from .types.bigtable_instance_admin import CreateInstanceMetadata +from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import DeleteAppProfileRequest +from .types.bigtable_instance_admin import DeleteClusterRequest +from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import GetAppProfileRequest +from .types.bigtable_instance_admin import GetClusterRequest +from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import ListAppProfilesRequest +from .types.bigtable_instance_admin import ListAppProfilesResponse +from .types.bigtable_instance_admin import ListClustersRequest +from .types.bigtable_instance_admin import ListClustersResponse +from .types.bigtable_instance_admin import ListInstancesRequest +from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import PartialUpdateInstanceRequest +from .types.bigtable_instance_admin import UpdateAppProfileMetadata +from .types.bigtable_instance_admin import UpdateAppProfileRequest +from .types.bigtable_instance_admin import UpdateClusterMetadata +from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_table_admin import CheckConsistencyRequest +from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CreateBackupMetadata +from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata +from .types.bigtable_table_admin import CreateTableFromSnapshotRequest +from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSnapshotRequest +from .types.bigtable_table_admin import DeleteTableRequest +from .types.bigtable_table_admin import DropRowRangeRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSnapshotRequest +from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListBackupsRequest +from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSnapshotsRequest +from .types.bigtable_table_admin import ListSnapshotsResponse +from .types.bigtable_table_admin import ListTablesRequest +from .types.bigtable_table_admin import ListTablesResponse +from .types.bigtable_table_admin import ModifyColumnFamiliesRequest +from .types.bigtable_table_admin import OptimizeRestoredTableMetadata +from .types.bigtable_table_admin import RestoreTableMetadata +from .types.bigtable_table_admin import RestoreTableRequest +from .types.bigtable_table_admin import SnapshotTableMetadata +from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import UpdateBackupRequest +from .types.common import OperationProgress +from .types.common import StorageType +from .types.instance import AppProfile +from .types.instance import Cluster +from .types.instance import Instance +from .types.table import Backup +from .types.table import BackupInfo +from .types.table import ColumnFamily +from .types.table import GcRule +from .types.table import RestoreInfo +from .types.table import RestoreSourceType +from .types.table import Snapshot +from .types.table import Table __all__ = ( - "enums", - "types", + "AppProfile", + "Backup", + "BackupInfo", "BigtableInstanceAdminClient", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "Cluster", + "ColumnFamily", + "CreateAppProfileRequest", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateClusterMetadata", + "CreateClusterRequest", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "CreateTableFromSnapshotMetadata", + "CreateTableFromSnapshotRequest", + "CreateTableRequest", + "DeleteAppProfileRequest", + "DeleteBackupRequest", + "DeleteClusterRequest", + "DeleteInstanceRequest", + "DeleteSnapshotRequest", + "DeleteTableRequest", + "DropRowRangeRequest", + "GcRule", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "GetAppProfileRequest", + "GetBackupRequest", + "GetClusterRequest", + "GetInstanceRequest", + "GetSnapshotRequest", + "GetTableRequest", + "Instance", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "ListBackupsRequest", + "ListBackupsResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "ListTablesRequest", + "ListTablesResponse", + "ModifyColumnFamiliesRequest", + "OperationProgress", + "OptimizeRestoredTableMetadata", + "PartialUpdateInstanceRequest", + "RestoreInfo", + "RestoreSourceType", + "RestoreTableMetadata", + "RestoreTableRequest", + "Snapshot", + "SnapshotTableMetadata", + "SnapshotTableRequest", + "StorageType", + "Table", + "UpdateAppProfileMetadata", + "UpdateAppProfileRequest", + "UpdateBackupRequest", + "UpdateClusterMetadata", + "UpdateInstanceMetadata", "BigtableTableAdminClient", ) diff --git a/google/cloud/bigtable_admin_v2/gapic/__init__.py b/google/cloud/bigtable_admin_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py deleted file mode 100644 index 4e8a0d0ba..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ /dev/null @@ -1,1919 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_instance_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableInstanceAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def app_profile_path(cls, project, instance, app_profile): - """Return a fully-qualified app_profile string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}", - project=project, - instance=instance, - app_profile=app_profile, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", - project=project, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableInstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_instance( - self, - parent, - instance_id, - instance, - clusters, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `clusters`: - >>> clusters = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance, clusters) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the project in which to create the new - instance. Values are of the form ``projects/{project}``. - instance_id (str): Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The instance to create. Fields marked ``OutputOnly`` must - be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): Required. The clusters to be created within the instance, mapped by - desired cluster ID, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields - marked ``OutputOnly`` must be left blank. Currently, at most four - clusters can be specified. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, - ) - - def get_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): Required. The unique name of the requested instance. Values are of - the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetInstanceRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about instances in a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> response = client.list_instances(parent) - - Args: - parent (str): Required. The unique name of the project for which a list of - instances is requested. Values are of the form ``projects/{project}``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, - page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_instances"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_instance( - self, - display_name, - name=None, - state=None, - type_=None, - labels=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `display_name`: - >>> display_name = '' - >>> - >>> response = client.update_instance(display_name) - - Args: - display_name (str): Required. The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - name (str): The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. - type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and - aggregate metrics. - - - Label keys must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and must - conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given resource. - - Keys and values must both be under 128 bytes. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Instance( - display_name=display_name, - name=name, - state=state, - type=type_, - labels=labels, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partial_update_instance( - self, - instance, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.partial_update_instance(instance, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. - Must be explicitly set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partial_update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "partial_update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self._method_configs["PartialUpdateInstance"].retry, - default_timeout=self._method_configs["PartialUpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, - update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["partial_update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Delete an instance from a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): Required. The unique name of the instance to be deleted. Values are - of the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteInstanceRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_cluster( - self, - parent, - cluster_id, - cluster, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(parent, cluster_id, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): Required. The ID to be used when referring to the new cluster within - its instance, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` - must be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, - cluster_id=cluster_id, - cluster=cluster, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, - ) - - def get_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a cluster. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> response = client.get_cluster(name) - - Args: - name (str): Required. The unique name of the requested cluster. Values are of - the form ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetClusterRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about clusters in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.list_clusters(parent) - - Args: - parent (str): Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, - page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_cluster( - self, - serve_nodes, - name=None, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `serve_nodes`: - >>> serve_nodes = 0 - >>> - >>> response = client.update_cluster(serve_nodes) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable - higher throughput and more consistent performance. - name (str): The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): (``CreationOnly``) The location where this cluster's nodes and - storage reside. For best performance, clients should be located as close - as possible to this cluster. Currently only zones are supported, so - values should be of the form ``projects/{project}/locations/{zone}``. - state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve - its parent instance's tables, unless explicitly overridden. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Cluster( - serve_nodes=serve_nodes, - name=name, - location=location, - state=state, - default_storage_type=default_storage_type, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, - ) - - def delete_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> client.delete_cluster(name) - - Args: - name (str): Required. The unique name of the cluster to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteClusterRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_app_profile( - self, - parent, - app_profile_id, - app_profile, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `app_profile_id`: - >>> app_profile_id = '' - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> response = client.create_app_profile(parent, app_profile_id, app_profile) - - Args: - parent (str): Required. The unique name of the instance in which to create the new - app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id (str): Required. The ID to be used when referring to the new app profile - within its instance, e.g., just ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - ignore_warnings (bool): If true, ignore safety checks when creating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "create_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_app_profile, - default_retry=self._method_configs["CreateAppProfile"].retry, - default_timeout=self._method_configs["CreateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, - app_profile_id=app_profile_id, - app_profile=app_profile, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_app_profile( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an app profile. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> response = client.get_app_profile(name) - - Args: - name (str): Required. The unique name of the requested app profile. Values are - of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "get_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_app_profile, - default_retry=self._method_configs["GetAppProfile"].retry, - default_timeout=self._method_configs["GetAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetAppProfileRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_app_profiles( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about app profiles in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_app_profiles(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_app_profiles(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which a list of app - profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list AppProfiles for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_app_profiles" not in self._inner_api_calls: - self._inner_api_calls[ - "list_app_profiles" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_app_profiles, - default_retry=self._method_configs["ListAppProfiles"].retry, - default_timeout=self._method_configs["ListAppProfiles"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_app_profiles"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="app_profiles", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_app_profile( - self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_app_profile(app_profile, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. - If unset, all fields will be replaced. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - ignore_warnings (bool): If true, ignore safety checks when updating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "update_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs["UpdateAppProfile"].retry, - default_timeout=self._method_configs["UpdateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, - update_mask=update_mask, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("app_profile.name", app_profile.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.AppProfile, - metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, - ) - - def delete_app_profile( - self, - name, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an app profile from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> client.delete_app_profile(name) - - Args: - name (str): Required. The unique name of the app profile to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_app_profile, - default_retry=self._method_configs["DeleteAppProfile"].retry, - default_timeout=self._method_configs["DeleteAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py deleted file mode 100644 index b2ec35e01..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ /dev/null @@ -1,136 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableInstanceAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - }, - "methods": { - "CreateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "GetInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListInstances": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "PartialUpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListAppProfiles": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py deleted file mode 100644 index d507a3c0b..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ /dev/null @@ -1,2336 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableTableAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def backup_path(cls, project, instance, cluster, backup): - """Return a fully-qualified backup string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - project=project, - instance=instance, - cluster=cluster, - backup=backup, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def snapshot_path(cls, project, instance, cluster, snapshot): - """Return a fully-qualified snapshot string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableTableAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_table_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = ( - bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_table( - self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `table`: - >>> table = {} - >>> - >>> response = client.create_table(parent, table_id, table) - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split - the table into several tablets (tablets are similar to HBase regions). - Given two split keys, ``s1`` and ``s2``, three tablets will be created, - spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Split` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs["CreateTable"].retry, - default_timeout=self._method_configs["CreateTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_table_from_snapshot( - self, - parent, - table_id, - source_snapshot, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> source_snapshot = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot (str): Required. The unique name of the snapshot from which to restore the - table. The snapshot and the table must be in the same instance. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table_from_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table_from_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table_from_snapshot, - default_retry=self._method_configs["CreateTableFromSnapshot"].retry, - default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, - table_id=table_id, - source_snapshot=source_snapshot, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_table_from_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, - ) - - def list_tables( - self, - parent, - view=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all tables served from a specified instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_tables(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which tables should be - listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_tables" not in self._inner_api_calls: - self._inner_api_calls[ - "list_tables" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs["ListTables"].retry, - default_timeout=self._method_configs["ListTables"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, - view=view, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_tables"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="tables", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_table( - self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified table. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.get_table(name) - - Args: - name (str): Required. The unique name of the requested table. Values are of the - form ``projects/{project}/instances/{instance}/tables/{table}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to - ``SCHEMA_VIEW`` if unspecified. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_table" not in self._inner_api_calls: - self._inner_api_calls[ - "get_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs["GetTable"].retry, - default_timeout=self._method_configs["GetTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetTableRequest( - name=name, - view=view, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_table( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes a specified table and all of its data. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.delete_table(name) - - Args: - name (str): Required. The unique name of the table to be deleted. Values are of - the form ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_table" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs["DeleteTable"].retry, - default_timeout=self._method_configs["DeleteTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteTableRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def modify_column_families( - self, - name, - modifications, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `modifications`: - >>> modifications = [] - >>> - >>> response = client.modify_column_families(name, modifications) - - Args: - name (str): Required. The unique name of the table whose families should be - modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's - families. Entries are applied in order, meaning that earlier modifications - can be masked by later ones (in the case of repeated updates to the same - family, for example). - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Modification` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "modify_column_families" not in self._inner_api_calls: - self._inner_api_calls[ - "modify_column_families" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs["ModifyColumnFamilies"].retry, - default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, - modifications=modifications, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["modify_column_families"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def drop_row_range( - self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.drop_row_range(name) - - Args: - name (str): Required. The unique name of the table on which to drop a range of - rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be - zero length. - delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_row_range" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_row_range" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs["DropRowRange"].retry, - default_timeout=self._method_configs["DropRowRange"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - - request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name, - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_row_range"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def generate_consistency_token( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.generate_consistency_token(name) - - Args: - name (str): Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "generate_consistency_token" not in self._inner_api_calls: - self._inner_api_calls[ - "generate_consistency_token" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self._method_configs["GenerateConsistencyToken"].retry, - default_timeout=self._method_configs[ - "GenerateConsistencyToken" - ].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["generate_consistency_token"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_consistency( - self, - name, - consistency_token, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `consistency_token`: - >>> consistency_token = '' - >>> - >>> response = client.check_consistency(name, consistency_token) - - Args: - name (str): Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_consistency" not in self._inner_api_calls: - self._inner_api_calls[ - "check_consistency" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs["CheckConsistency"].retry, - default_timeout=self._method_configs["CheckConsistency"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, - consistency_token=consistency_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_consistency"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified table resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def snapshot_table( - self, - name, - cluster, - snapshot_id, - ttl=None, - description=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `snapshot_id`: - >>> snapshot_id = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The unique name of the table to have the snapshot taken. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): Required. The name of the cluster where the snapshot will be created - in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): Required. The ID by which the new snapshot should be referred to - within the parent cluster, e.g., ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is - created. Once 'ttl' expires, the snapshot will get deleted. The maximum - amount of time a snapshot can stay active is 7 days. If 'ttl' is not - specified, the default value of 24 hours will be used. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Duration` - description (str): Description of the snapshot. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "snapshot_table" not in self._inner_api_calls: - self._inner_api_calls[ - "snapshot_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs["SnapshotTable"].retry, - default_timeout=self._method_configs["SnapshotTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - ttl=ttl, - description=description, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["snapshot_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Snapshot, - metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, - ) - - def get_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.get_snapshot(name) - - Args: - name (str): Required. The unique name of the requested snapshot. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "get_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_snapshot, - default_retry=self._method_configs["GetSnapshot"].retry, - default_timeout=self._method_configs["GetSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetSnapshotRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_snapshots( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_snapshots(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_snapshots(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the cluster for which snapshots should - be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list snapshots for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_snapshots" not in self._inner_api_calls: - self._inner_api_calls[ - "list_snapshots" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_snapshots, - default_retry=self._method_configs["ListSnapshots"].retry, - default_timeout=self._method_configs["ListSnapshots"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_snapshots"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="snapshots", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> client.delete_snapshot(name) - - Args: - name (str): Required. The unique name of the snapshot to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_snapshot, - default_retry=self._method_configs["DeleteSnapshot"].retry, - default_timeout=self._method_configs["DeleteSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteSnapshotRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_backup( - self, - parent, - backup_id, - backup, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `backup_id`: - >>> backup_id = '' - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> response = client.create_backup(parent, backup_id, backup) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. This must be one of the clusters in the instance in which - this table is located. The backup will be stored in this cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): Required. The id of the backup to be created. The ``backup_id`` - along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, of the - form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length and match the - regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "create_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_backup, - default_retry=self._method_configs["CreateBackup"].retry, - default_timeout=self._method_configs["CreateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, - backup_id=backup_id, - backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Backup, - metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, - ) - - def get_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> response = client.get_backup(name) - - Args: - name (str): Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "get_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_backup, - default_retry=self._method_configs["GetBackup"].retry, - default_timeout=self._method_configs["GetBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetBackupRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_backups( - self, - parent, - filter_=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_backups(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_backups(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The cluster to list backups from. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list backups for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - filter_ (str): A filter expression that filters backups listed in the response. The - expression must specify the field name, a comparison operator, and the - value that you want to use for filtering. The value must be a string, a - number, or a boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ‘:’ represents a HAS operator which is roughly synonymous - with equality. Filter rules are case insensitive. - - The fields eligible for filtering are: - - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - To filter on multiple expressions, provide each separate expression - within parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions explicitly. - - Some examples of using filters are: - - - ``name:"exact"`` --> The backup's name is the string "exact". - - ``name:howl`` --> The backup's name contains the string "howl". - - ``source_table:prod`` --> The source_table's name contains the string - "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` --> The - backup name contains the string "howl" and start_time of the backup - is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is greater than - 10GB - order_by (str): An expression for specifying the sort order of the results of the - request. The string value should specify one or more fields in - ``Backup``. The full syntax is described at - https://aip.dev/132#ordering. - - Fields supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state - - For example, "start_time". The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" should be - appended to the field name. For example, "start_time desc". Redundant - space characters in the syntax are insigificant. - - If order_by is empty, results will be sorted by ``start_time`` in - descending order starting from the most recently created backup. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Backup` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_backups" not in self._inner_api_calls: - self._inner_api_calls[ - "list_backups" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_backups, - default_retry=self._method_configs["ListBackups"].retry, - default_timeout=self._method_configs["ListBackups"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, - filter=filter_, - order_by=order_by, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_backups"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="backups", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_backup( - self, - backup, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_backup(backup, update_mask) - - Args: - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to - be updated as specified by ``update_mask`` are required. Other fields - are ignored. Update is only supported for the following fields: - - - ``backup.expire_time``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in - the Backup resource should be updated. This mask is relative to the - Backup resource, not to the request message. The field mask must always - be specified; this prevents any future fields from being erased - accidentally by clients that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "update_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_backup, - default_retry=self._method_configs["UpdateBackup"].retry, - default_timeout=self._method_configs["UpdateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, - update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("backup.name", backup.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a pending or completed Cloud Bigtable backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> client.delete_backup(name) - - Args: - name (str): Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_backup, - default_retry=self._method_configs["DeleteBackup"].retry, - default_timeout=self._method_configs["DeleteBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteBackupRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def restore_table( - self, - parent=None, - table_id=None, - backup=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> response = client.restore_table() - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance in which to create the restored - table. This instance must be the parent of the source backup. Values are - of the form ``projects//instances/``. - table_id (str): Required. The id of the table to create and restore to. This table - must not already exist. The ``table_id`` appended to ``parent`` forms - the full table name of the form - ``projects//instances//tables/``. - backup (str): Name of the backup from which to restore. Values are of the form - ``projects//instances//clusters//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "restore_table" not in self._inner_api_calls: - self._inner_api_calls[ - "restore_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.restore_table, - default_retry=self._method_configs["RestoreTable"].retry, - default_timeout=self._method_configs["RestoreTable"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - backup=backup, - ) - - request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, - table_id=table_id, - backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["restore_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, - ) diff --git a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py deleted file mode 100644 index db60047bd..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ /dev/null @@ -1,160 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableTableAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "drop_row_range_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - }, - "methods": { - "CreateTable": { - "timeout_millis": 130000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "CreateTableFromSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ListTables": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetTable": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ModifyColumnFamilies": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "DropRowRange": { - "timeout_millis": 900000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "drop_row_range_params", - }, - "GenerateConsistencyToken": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "CheckConsistency": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SnapshotTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListSnapshots": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetBackup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListBackups": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "DeleteBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "RestoreTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py deleted file mode 100644 index 536629604..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc - - -class BigtableInstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableInstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. - - Create an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateInstance - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. - - Gets information about an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetInstance - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. - - Lists information about instances in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListInstances - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateInstance - - @property - def partial_update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. - - Delete an instance from a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteInstance - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. - - Creates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateCluster - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. - - Gets information about a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. - - Lists information about clusters in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListClusters - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. - - Updates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateCluster - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. - - Deletes a cluster from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteCluster - - @property - def create_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. - - Creates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile - - @property - def get_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. - - Gets information about an app profile. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetAppProfile - - @property - def list_app_profiles(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. - - Lists information about app profiles in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles - - @property - def update_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - - Updates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile - - @property - def delete_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. - - Deletes an app profile from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py deleted file mode 100644 index 281bad20a..000000000 --- a/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ /dev/null @@ -1,471 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc - - -class BigtableTableAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableTableAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. - - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTable - - @property - def create_table_from_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. - - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot - - @property - def list_tables(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. - - Lists all tables served from a specified instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListTables - - @property - def get_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. - - Gets metadata information about the specified table. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetTable - - @property - def delete_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. - - Permanently deletes a specified table and all of its data. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteTable - - @property - def modify_column_families(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. - - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies - - @property - def drop_row_range(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. - - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DropRowRange - - @property - def generate_consistency_token(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. - - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken - - @property - def check_consistency(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. - - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CheckConsistency - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - - Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified table resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].TestIamPermissions - - @property - def snapshot_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SnapshotTable - - @property - def get_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. - - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetSnapshot - - @property - def list_snapshots(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. - - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListSnapshots - - @property - def delete_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot - - @property - def create_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. - - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateBackup - - @property - def get_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. - - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetBackup - - @property - def list_backups(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_backups`. - - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListBackups - - @property - def update_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].UpdateBackup - - @property - def delete_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteBackup - - @property - def restore_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. - - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].RestoreTable diff --git a/google/cloud/bigtable_admin_v2/proto/__init__.py b/google/cloud/bigtable_admin_v2/proto/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc463..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index 8b19b5582..ca3aaed7a 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -565,9 +564,11 @@ message DeleteAppProfileRequest { } ]; - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; + // Required. If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2 [(google.api.field_behavior) = REQUIRED]; } // The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata {} +message UpdateAppProfileMetadata { + +} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py deleted file mode 100644 index 38fe53f88..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ /dev/null @@ -1,2434 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name="ClustersEntry", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=723, - serialized_end=805, -) - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.bigtable.admin.v2.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=458, - serialized_end=805, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.bigtable.admin.v2.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=807, - serialized_end=883, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.bigtable.admin.v2.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=885, - serialized_end=996, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.bigtable.admin.v2.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=999, - serialized_end=1128, -) - - -_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="PartialUpdateInstanceRequest", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1131, - serialized_end=1274, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1276, - serialized_end=1355, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.bigtable.admin.v2.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1358, - serialized_end=1520, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.bigtable.admin.v2.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1522, - serialized_end=1596, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.bigtable.admin.v2.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListClustersRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1598, - serialized_end=1697, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.bigtable.admin.v2.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1699, - serialized_end=1825, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.bigtable.admin.v2.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1827, - serialized_end=1904, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1907, - serialized_end=2105, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2108, - serialized_end=2313, -) - - -_CREATECLUSTERMETADATA = _descriptor.Descriptor( - name="CreateClusterMetadata", - full_name="google.bigtable.admin.v2.CreateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2316, - serialized_end=2512, -) - - -_UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name="UpdateClusterMetadata", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2515, - serialized_end=2698, -) - - -_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="CreateAppProfileRequest", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2701, - serialized_end=2902, -) - - -_GETAPPPROFILEREQUEST = _descriptor.Descriptor( - name="GetAppProfileRequest", - full_name="google.bigtable.admin.v2.GetAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2904, - serialized_end=2984, -) - - -_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( - name="ListAppProfilesRequest", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2986, - serialized_end=3107, -) - - -_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( - name="ListAppProfilesResponse", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profiles", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3110, - serialized_end=3246, -) - - -_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="UpdateAppProfileRequest", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3249, - serialized_end=3417, -) - - -_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="DeleteAppProfileRequest", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3419, - serialized_end=3527, -) - - -_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( - name="UpdateAppProfileMetadata", - full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3529, - serialized_end=3555, -) - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_CREATEINSTANCEREQUEST.fields_by_name[ - "clusters" -].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name[ - "instances" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATECLUSTERREQUEST.fields_by_name[ - "cluster" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_LISTCLUSTERSRESPONSE.fields_by_name[ - "clusters" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _PARTIALUPDATEINSTANCEREQUEST -_UPDATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATECLUSTERREQUEST -_CREATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_UPDATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_LISTAPPPROFILESRESPONSE.fields_by_name[ - "app_profiles" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name[ - "PartialUpdateInstanceRequest" -] = _PARTIALUPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - { - "ClustersEntry": _reflection.GeneratedProtocolMessageType( - "ClustersEntry", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - }, - ), - "DESCRIPTOR": _CREATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. - - Attributes: - parent: - Required. The unique name of the project in which to create - the new instance. Values are of the form - ``projects/{project}``. - instance_id: - Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance: - Required. The instance to create. Fields marked ``OutputOnly`` - must be left blank. - clusters: - Required. The clusters to be created within the instance, - mapped by desired cluster ID, e.g., just ``mycluster`` rather - than ``projects/myproject/instances/myinstance/clusters/myclus - ter``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - }, -) -_sym_db.RegisterMessage(CreateInstanceRequest) -_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. - - Attributes: - name: - Required. The unique name of the requested instance. Values - are of the form ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - }, -) -_sym_db.RegisterMessage(GetInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. - - Attributes: - parent: - Required. The unique name of the project for which a list of - instances is requested. Values are of the form - ``projects/{project}``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - }, -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. - - Attributes: - instances: - The list of requested instances. - failed_locations: - Locations from which Instance information could not be - retrieved, due to an outage or some other transient condition. - Instances whose Clusters are all in one of the failed - locations may be missing from ``instances``, and Instances - with at least one Cluster in a failed location may only have - partial information returned. Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - }, -) -_sym_db.RegisterMessage(ListInstancesResponse) - -PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "PartialUpdateInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. - - Attributes: - instance: - Required. The Instance which will (partially) replace the - current value. - update_mask: - Required. The subset of Instance fields which should be - replaced. Must be explicitly set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - }, -) -_sym_db.RegisterMessage(PartialUpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. - - Attributes: - name: - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - }, -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id: - Required. The ID to be used when referring to the new cluster - within its instance, e.g., just ``mycluster`` rather than ``pr - ojects/myproject/instances/myinstance/clusters/mycluster``. - cluster: - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. - - Attributes: - name: - Required. The unique name of the requested cluster. Values are - of the form ``projects/{project}/instances/{instance}/clusters - /{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. - - Attributes: - clusters: - The list of requested clusters. - failed_locations: - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient condition. - Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. - - Attributes: - name: - Required. The unique name of the cluster to be deleted. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - UpdateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - -CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "CreateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this CreateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - }, -) -_sym_db.RegisterMessage(CreateClusterMetadata) - -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this UpdateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - }, -) -_sym_db.RegisterMessage(UpdateClusterMetadata) - -CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "CreateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id: - Required. The ID to be used when referring to the new app - profile within its instance, e.g., just ``myprofile`` rather - than ``projects/myproject/instances/myinstance/appProfiles/myp - rofile``. - app_profile: - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - ignore_warnings: - If true, ignore safety checks when creating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(CreateAppProfileRequest) - -GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "GetAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. - - Attributes: - name: - Required. The unique name of the requested app profile. Values - are of the form ``projects/{project}/instances/{instance}/appP - rofiles/{app_profile}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - }, -) -_sym_db.RegisterMessage(GetAppProfileRequest) - -ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - app profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list AppProfiles for all Instances in a project, - e.g., ``projects/myproject/instances/-``. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - }, -) -_sym_db.RegisterMessage(ListAppProfilesRequest) - -ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - app_profiles: - The list of requested app profiles. - next_page_token: - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - failed_locations: - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient condition. - AppProfiles from these locations may be missing from - ``app_profiles``. Values are of the form - ``projects//locations/`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - }, -) -_sym_db.RegisterMessage(ListAppProfilesResponse) - -UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. - - Attributes: - app_profile: - Required. The app profile which will (partially) replace the - current value. - update_mask: - Required. The subset of app profile fields which should be - replaced. If unset, all fields will be replaced. - ignore_warnings: - If true, ignore safety checks when updating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileRequest) - -DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. - - Attributes: - name: - Required. The unique name of the app profile to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/appProfiles/{app_profile}``. - ignore_warnings: - If true, ignore safety checks when deleting the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - }, -) -_sym_db.RegisterMessage(DeleteAppProfileRequest) - -UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateAppProfile.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileMetadata) - - -DESCRIPTOR._options = None -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None -_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["clusters"]._options = None -_GETINSTANCEREQUEST.fields_by_name["name"]._options = None -_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["parent"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_GETCLUSTERREQUEST.fields_by_name["name"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["parent"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["name"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["parent"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile_id"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_GETAPPPROFILEREQUEST.fields_by_name["name"]._options = None -_LISTAPPPROFILESREQUEST.fields_by_name["parent"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None - -_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( - name="BigtableInstanceAdmin", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=3558, - serialized_end=7416, - methods=[ - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - index=0, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*\332A$parent,instance_id,instance,clusters\312A"\n\010Instance\022\026CreateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - index=1, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - index=3, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="PartialUpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - index=4, - containing_service=None, - input_type=_PARTIALUPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance\332A\024instance,update_mask\312A"\n\010Instance\022\026UpdateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - index=5, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - index=6, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster\332A\031parent,cluster_id,cluster\312A \n\007Cluster\022\025CreateClusterMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - index=7, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - serialized_options=b"\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - index=8, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - index=9, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*\312A \n\007Cluster\022\025UpdateClusterMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - index=10, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - index=11, - containing_service=None, - input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b'\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile\332A!parent,app_profile_id,app_profile', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - index=12, - containing_service=None, - input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b"\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListAppProfiles", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - index=13, - containing_service=None, - input_type=_LISTAPPPROFILESREQUEST, - output_type=_LISTAPPPROFILESRESPONSE, - serialized_options=b"\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - index=14, - containing_service=None, - input_type=_UPDATEAPPPROFILEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile\332A\027app_profile,update_mask\312A&\n\nAppProfile\022\030UpdateAppProfileMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - index=15, - containing_service=None, - input_type=_DELETEAPPPROFILEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - index=16, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - index=17, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - index=18, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) - -DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py deleted file mode 100644 index 0337e5d4f..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,880 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.PartialUpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.GetAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.ListAppProfiles = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - ) - self.UpdateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def CreateInstance(self, request, context): - """Create an instance within a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists information about instances in a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Delete an instance from a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateCluster(self, request, context): - """Creates a cluster within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets information about a cluster.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists information about clusters in an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateAppProfile(self, request, context): - """Creates an app profile within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAppProfile(self, request, context): - """Gets information about an app profile.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "GetAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "ListAppProfiles": grpc.unary_unary_rpc_method_handler( - servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, - ), - "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableInstanceAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - @staticmethod - def CreateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListInstances( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def PartialUpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListClusters( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListAppProfiles( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 6f434a473..d979dba59 100644 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -72,8 +72,7 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) - returns (google.longrunning.Operation) { + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" @@ -135,8 +134,7 @@ service BigtableTableAdmin { // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) - returns (GenerateConsistencyTokenResponse) { + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" @@ -147,8 +145,7 @@ service BigtableTableAdmin { // Checks replication consistency based on a consistency token, that is, if // replication has caught up based on the conditions specified in the token // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) - returns (CheckConsistencyResponse) { + rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" @@ -164,14 +161,12 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc SnapshotTable(SnapshotTableRequest) - returns (google.longrunning.Operation) { + rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" }; - option (google.api.method_signature) = - "name,cluster,snapshot_id,description"; + option (google.api.method_signature) = "name,cluster,snapshot_id,description"; option (google.longrunning.operation_info) = { response_type: "Snapshot" metadata_type: "SnapshotTableMetadata" @@ -220,24 +215,24 @@ service BigtableTableAdmin { option (google.api.method_signature) = "name"; } - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - // returned operation will stop the creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" body: "backup" }; + option (google.api.method_signature) = "parent,backup_id,backup"; option (google.longrunning.operation_info) = { response_type: "Backup" metadata_type: "CreateBackupMetadata" }; - option (google.api.method_signature) = "parent,backup_id,backup"; } // Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -275,11 +270,11 @@ service BigtableTableAdmin { } // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { @@ -293,22 +288,24 @@ service BigtableTableAdmin { }; } - // Gets the access control policy for a resource. + // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" body: "*" + additional_bindings { + post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy" + body: "*" + } }; option (google.api.method_signature) = "resource"; } // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" body: "*" @@ -320,9 +317,8 @@ service BigtableTableAdmin { option (google.api.method_signature) = "resource,policy"; } - // Returns permissions that the caller has on the specified table resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { + // Returns permissions that the caller has on the specified Table or Backup resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" body: "*" @@ -335,6 +331,78 @@ service BigtableTableAdmin { } } +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableRequest { + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. + string table_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + string backup = 3 [(google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + }]; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableMetadata { + // Name of the table being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the table, as specified by + // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + oneof source_info { + BackupInfo backup_info = 3; + } + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + string optimize_table_operation_name = 4; + + // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + OperationProgress progress = 5; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +message OptimizeRestoredTableMetadata { + // Name of the restored table being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] message CreateTableRequest { @@ -353,8 +421,8 @@ message CreateTableRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. // Maximum 50 characters. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; @@ -397,13 +465,13 @@ message CreateTableFromSnapshotRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The unique name of the snapshot from which to restore the table. - // The snapshot and the table must be in the same instance. Values are of the - // form + // Required. The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. string source_snapshot = 3 [ (google.api.field_behavior) = REQUIRED, @@ -421,7 +489,9 @@ message DropRowRangeRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Delete all rows or by prefix. @@ -438,8 +508,8 @@ message DropRowRangeRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] message ListTablesRequest { - // Required. The unique name of the instance for which tables should be - // listed. Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be listed. + // Values are of the form `projects/{project}/instances/{instance}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -486,7 +556,9 @@ message GetTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // The view to be applied to the returned table's fields. @@ -502,7 +574,9 @@ message DeleteTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -535,26 +609,29 @@ message ModifyColumnFamiliesRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; - // Required. Modifications to be atomically applied to the specified table's - // families. Entries are applied in order, meaning that earlier modifications - // can be masked by later ones (in the case of repeated updates to the same - // family, for example). - repeated Modification modifications = 2 - [(google.api.field_behavior) = REQUIRED]; + // Required. Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2 [(google.api.field_behavior) = REQUIRED]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] message GenerateConsistencyTokenRequest { - // Required. The unique name of the Table for which to create a consistency - // token. Values are of the form + // Required. The unique name of the Table for which to create a consistency token. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -568,12 +645,14 @@ message GenerateConsistencyTokenResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyRequest { - // Required. The unique name of the Table for which to check replication - // consistency. Values are of the form + // Required. The unique name of the Table for which to check replication consistency. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The token created using GenerateConsistencyToken for the Table. @@ -601,7 +680,9 @@ message SnapshotTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The name of the cluster where the snapshot will be created in. @@ -614,9 +695,9 @@ message SnapshotTableRequest { } ]; - // Required. The ID by which the new snapshot should be referred to within the - // parent cluster, e.g., `mysnapshot` of the form: - // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than + // Required. The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; @@ -657,8 +738,8 @@ message GetSnapshotRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message ListSnapshotsRequest { - // Required. The unique name of the cluster for which snapshots should be - // listed. Values are of the form + // Required. The unique name of the cluster for which snapshots should be listed. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -748,8 +829,7 @@ message CreateTableFromSnapshotMetadata { google.protobuf.Timestamp finish_time = 3; } -// The request for -// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. message CreateBackupRequest { // Required. This must be one of the clusters in the instance in which this // table is located. The backup will be stored in this cluster. Values are @@ -789,20 +869,7 @@ message CreateBackupMetadata { google.protobuf.Timestamp end_time = 4; } -// The request for -// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -message GetBackupRequest { - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } - ]; -} - -// The request for -// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. message UpdateBackupRequest { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. @@ -815,26 +882,38 @@ message UpdateBackupRequest { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// The request for -// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } + ]; +} + +// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. message DeleteBackupRequest { // Required. Name of the backup to delete. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } ]; } -// The request for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsRequest { - // Required. The cluster to list backups from. Values are of the + // Required. The cluster to list backups from. Values are of the // form `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list backups for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -849,7 +928,7 @@ message ListBackupsRequest { // The expression must specify the field name, a comparison operator, // and the value that you want to use for filtering. The value must be a // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is + // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: @@ -880,9 +959,8 @@ message ListBackupsRequest { string filter = 2; // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in - // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at - // https://aip.dev/132#ordering. + // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full + // syntax is described at https://aip.dev/132#ordering. // // Fields supported are: // * name @@ -907,88 +985,19 @@ message ListBackupsRequest { int32 page_size = 4; // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - // from a previous - // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the - // same `parent` and with the same `filter`. + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a + // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same + // `filter`. string page_token = 5; } -// The response for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsResponse { // The list of matching backups. repeated Backup backups = 1; // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call - // to fetch more of the matching backups. + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more + // of the matching backups. string next_page_token = 2; } - -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableRequest { - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - string parent = 1; - - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - string table_id = 2; - - // Required. The source from which to restore. - oneof source { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - string backup = 3; - } -} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableMetadata { - // Name of the table being created and restored to. - string name = 1; - - // The type of the restore source. - RestoreSourceType source_type = 2; - - // Information about the source used to restore the table, as specified by - // `source` in - // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - oneof source_info { - BackupInfo backup_info = 3; - } - - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - string optimize_table_operation_name = 4; - - // The progress of the - // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - OperationProgress progress = 5; -} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -message OptimizeRestoredTableMetadata { - // Name of the restored table being optimized. - string name = 1; - - // The progress of the post-restore optimizations. - OperationProgress progress = 2; -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py deleted file mode 100644 index c7094eac2..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ /dev/null @@ -1,3578 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name="Split", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=767, - serialized_end=787, -) - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name="CreateTableRequest", - full_name="google.bigtable.admin.v2.CreateTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table", - full_name="google.bigtable.admin.v2.CreateTableRequest.table", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="initial_splits", - full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _CREATETABLEREQUEST_SPLIT, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=535, - serialized_end=787, -) - - -_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( - name="CreateTableFromSnapshotRequest", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_snapshot", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=790, - serialized_end=970, -) - - -_DROPROWRANGEREQUEST = _descriptor.Descriptor( - name="DropRowRangeRequest", - full_name="google.bigtable.admin.v2.DropRowRangeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_prefix", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_all_data_from_table", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=973, - serialized_end=1121, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name="ListTablesRequest", - full_name="google.bigtable.admin.v2.ListTablesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListTablesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.ListTablesRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1124, - serialized_end=1292, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name="ListTablesResponse", - full_name="google.bigtable.admin.v2.ListTablesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="tables", - full_name="google.bigtable.admin.v2.ListTablesResponse.tables", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1294, - serialized_end=1388, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name="GetTableRequest", - full_name="google.bigtable.admin.v2.GetTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.GetTableRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1390, - serialized_end=1512, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name="DeleteTableRequest", - full_name="google.bigtable.admin.v2.DeleteTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1514, - serialized_end=1587, -) - - -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name="Modification", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="drop", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mod", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1771, - serialized_end=1936, -) - -_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name="ModifyColumnFamiliesRequest", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="modifications", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1590, - serialized_end=1936, -) - - -_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( - name="GenerateConsistencyTokenRequest", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1938, - serialized_end=2024, -) - - -_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( - name="GenerateConsistencyTokenResponse", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2026, - serialized_end=2087, -) - - -_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( - name="CheckConsistencyRequest", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2089, - serialized_end=2199, -) - - -_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( - name="CheckConsistencyResponse", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistent", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2201, - serialized_end=2247, -) - - -_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( - name="SnapshotTableRequest", - full_name="google.bigtable.admin.v2.SnapshotTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="snapshot_id", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ttl", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2250, - serialized_end=2470, -) - - -_GETSNAPSHOTREQUEST = _descriptor.Descriptor( - name="GetSnapshotRequest", - full_name="google.bigtable.admin.v2.GetSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2472, - serialized_end=2548, -) - - -_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( - name="ListSnapshotsRequest", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2550, - serialized_end=2668, -) - - -_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( - name="ListSnapshotsResponse", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="snapshots", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2670, - serialized_end=2773, -) - - -_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( - name="DeleteSnapshotRequest", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2775, - serialized_end=2854, -) - - -_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( - name="SnapshotTableMetadata", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2857, - serialized_end=3053, -) - - -_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( - name="CreateTableFromSnapshotMetadata", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3056, - serialized_end=3272, -) - - -_CREATEBACKUPREQUEST = _descriptor.Descriptor( - name="CreateBackupRequest", - full_name="google.bigtable.admin.v2.CreateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateBackupRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_id", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3275, - serialized_end=3432, -) - - -_CREATEBACKUPMETADATA = _descriptor.Descriptor( - name="CreateBackupMetadata", - full_name="google.bigtable.admin.v2.CreateBackupMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.start_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3435, - serialized_end=3587, -) - - -_GETBACKUPREQUEST = _descriptor.Descriptor( - name="GetBackupRequest", - full_name="google.bigtable.admin.v2.GetBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3589, - serialized_end=3661, -) - - -_UPDATEBACKUPREQUEST = _descriptor.Descriptor( - name="UpdateBackupRequest", - full_name="google.bigtable.admin.v2.UpdateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3664, - serialized_end=3794, -) - - -_DELETEBACKUPREQUEST = _descriptor.Descriptor( - name="DeleteBackupRequest", - full_name="google.bigtable.admin.v2.DeleteBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3796, - serialized_end=3871, -) - - -_LISTBACKUPSREQUEST = _descriptor.Descriptor( - name="ListBackupsRequest", - full_name="google.bigtable.admin.v2.ListBackupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3874, - serialized_end=4024, -) - - -_LISTBACKUPSRESPONSE = _descriptor.Descriptor( - name="ListBackupsResponse", - full_name="google.bigtable.admin.v2.ListBackupsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backups", - full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4026, - serialized_end=4123, -) - - -_RESTORETABLEREQUEST = _descriptor.Descriptor( - name="RestoreTableRequest", - full_name="google.bigtable.admin.v2.RestoreTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.bigtable.admin.v2.RestoreTableRequest.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4125, - serialized_end=4208, -) - - -_RESTORETABLEMETADATA = _descriptor.Descriptor( - name="RestoreTableMetadata", - full_name="google.bigtable.admin.v2.RestoreTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optimize_table_operation_name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4211, - serialized_end=4491, -) - - -_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( - name="OptimizeRestoredTableMetadata", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4493, - serialized_end=4601, -) - -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name[ - "table" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_CREATETABLEREQUEST.fields_by_name[ - "initial_splits" -].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "row_key_prefix" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "delete_all_data_from_table" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_LISTTABLESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_LISTTABLESRESPONSE.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_GETTABLEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "drop" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ - "modifications" -].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name[ - "ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name[ - "snapshots" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -) -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "original_request" -].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_CREATEBACKUPMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_UPDATEBACKUPREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTBACKUPSRESPONSE.fields_by_name[ - "backups" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( - _RESTORETABLEREQUEST.fields_by_name["backup"] -) -_RESTORETABLEREQUEST.fields_by_name[ - "backup" -].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] -_RESTORETABLEMETADATA.fields_by_name[ - "source_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO -) -_RESTORETABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( - _RESTORETABLEMETADATA.fields_by_name["backup_info"] -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] -_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotRequest" -] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "ModifyColumnFamiliesRequest" -] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenRequest" -] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenResponse" -] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotMetadata" -] = _CREATETABLEFROMSNAPSHOTMETADATA -DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA -DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST -DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE -DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST -DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "OptimizeRestoredTableMetadata" -] = _OPTIMIZERESTOREDTABLEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateTableRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableRequest", - (_message.Message,), - { - "Split": _reflection.GeneratedProtocolMessageType( - "Split", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """An initial split point for a newly created table. - - Attributes: - key: - Row key to use as an initial tablet boundary. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - }, - ), - "DESCRIPTOR": _CREATETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table: - Required. The Table to create. - initial_splits: - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, three - tablets will be created, spanning the key ranges: ``[, s1), - [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", - "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial_split_keys := ``["apple", "customer_1", - "customer_2", "other"]`` - Key assignment: - Tablet 1 - ``[, apple) => {"a"}.`` - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - }, -) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) - -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.Create - TableFromSnapshot] Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently available to most - Cloud Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It is not - subject to any SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot: - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in the - same instance. Values are of the form ``projects/{project}/ins - tances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) - -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( - "DropRowRangeRequest", - (_message.Message,), - { - "DESCRIPTOR": _DROPROWRANGEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR - owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - Attributes: - name: - Required. The unique name of the table on which to drop a - range of rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - target: - Delete all rows or by prefix. - row_key_prefix: - Delete all rows that start with this row key prefix. Prefix - cannot be zero length. - delete_all_data_from_table: - Delete all rows in the table. Setting this to false is a no- - op. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - }, -) -_sym_db.RegisterMessage(DropRowRangeRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType( - "ListTablesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT - ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - parent: - Required. The unique name of the instance for which tables - should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view: - The view to be applied to the returned tables’ fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - }, -) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType( - "ListTablesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - tables: - The tables present in the requested instance. - next_page_token: - Set if not all tables could be returned in a single response. - Pass this value to ``page_token`` in another request to get - the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - }, -) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType( - "GetTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa - ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - Attributes: - name: - Required. The unique name of the requested table. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - view: - The view to be applied to the returned table’s fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - }, -) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType( - "DeleteTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - Attributes: - name: - Required. The unique name of the table to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - }, -) -_sym_db.RegisterMessage(DeleteTableRequest) - -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( - "ModifyColumnFamiliesRequest", - (_message.Message,), - { - "Modification": _reflection.GeneratedProtocolMessageType( - "Modification", - (_message.Message,), - { - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """A create, update, or delete of a particular column family. - - Attributes: - id: - The ID of the column family to be modified. - mod: - Column familiy modifications. - create: - Create a new column family with the specified schema, or fail - if one already exists with the given ID. - update: - Update an existing column family to the specified schema, or - fail if no column family exists with the given ID. - drop: - Drop (delete) the column family with the given ID, or fail if - no such family exists. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - }, - ), - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif - yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol - umnFamilies] - - Attributes: - name: - Required. The unique name of the table whose families should - be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications: - Required. Modifications to be atomically applied to the - specified table’s families. Entries are applied in order, - meaning that earlier modifications can be masked by later ones - (in the case of repeated updates to the same family, for - example). - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - }, -) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) - -GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenRequest", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken] - - Attributes: - name: - Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) - -GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenResponse", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken] - - Attributes: - consistency_token: - The generated consistency token. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) - -CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check - Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste - ncy] - - Attributes: - name: - Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token: - Required. The token created using GenerateConsistencyToken for - the Table. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - }, -) -_sym_db.RegisterMessage(CheckConsistencyRequest) - -CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec - kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist - ency] - - Attributes: - consistent: - True only if the token is consistent. A token is consistent if - replication has caught up with the restrictions specified in - the request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - }, -) -_sym_db.RegisterMessage(CheckConsistencyResponse) - -SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( - "SnapshotTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Snaps - hotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the table to have the snapshot - taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster: - Required. The name of the cluster where the snapshot will be - created in. Values are of the form ``projects/{project}/instan - ces/{instance}/clusters/{cluster}``. - snapshot_id: - Required. The ID by which the new snapshot should be referred - to within the parent cluster, e.g., ``mysnapshot`` of the - form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects/{ - project}/instances/{instance}/clusters/{cluster}/snapshots/mys - napshot``. - ttl: - The amount of time that the new snapshot can stay active after - it is created. Once ‘ttl’ expires, the snapshot will get - deleted. The maximum amount of time a snapshot can stay active - is 7 days. If ‘ttl’ is not specified, the default value of 24 - hours will be used. - description: - Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - }, -) -_sym_db.RegisterMessage(SnapshotTableRequest) - -GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "GetSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSn - apshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the requested snapshot. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - }, -) -_sym_db.RegisterMessage(GetSnapshotRequest) - -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListS - napshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the cluster for which snapshots - should be listed. Values are of the form ``projects/{project}/ - instances/{instance}/clusters/{cluster}``. Use ``{cluster} = - '-'`` to list snapshots for all clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - page_size: - The maximum number of snapshots to return per page. CURRENTLY - UNIMPLEMENTED AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - }, -) -_sym_db.RegisterMessage(ListSnapshotsRequest) - -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Snapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - snapshots: - The snapshots present in the requested cluster. - next_page_token: - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - }, -) -_sym_db.RegisterMessage(ListSnapshotsResponse) - -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the snapshot to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - }, -) -_sym_db.RegisterMessage(DeleteSnapshotRequest) - -SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( - "SnapshotTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by SnapshotTable. Note: This - is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this SnapshotTable - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - }, -) -_sym_db.RegisterMessage(SnapshotTableMetadata) - -CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateTableFromSnapshot. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateTableFromSnapshot operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) - -CreateBackupRequest = _reflection.GeneratedProtocolMessageType( - "CreateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.CreateBackup]. - - Attributes: - parent: - Required. This must be one of the clusters in the instance in - which this table is located. The backup will be stored in this - cluster. Values are of the form ``projects/{project}/instances - /{instance}/clusters/{cluster}``. - backup_id: - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, - of the form: ``projects/{project}/instances/{instance}/cluster - s/{cluster}/backups/{backup_id}``. This string must be between - 1 and 50 characters in length and match the regex [_a- - zA-Z0-9][-_.a-zA-Z0-9]*. - backup: - Required. The backup to create. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupRequest) - }, -) -_sym_db.RegisterMessage(CreateBackupRequest) - -CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( - "CreateBackupMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt - able.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - name: - The name of the backup being created. - source_table: - The name of the table the backup is created from. - start_time: - The time at which this operation started. - end_time: - If set, the time at which this operation finished or was - cancelled. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupMetadata) - }, -) -_sym_db.RegisterMessage(CreateBackupMetadata) - -GetBackupRequest = _reflection.GeneratedProtocolMessageType( - "GetBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name: - Required. Name of the backup. Values are of the form ``project - s/{project}/instances/{instance}/clusters/{cluster}/backups/{b - ackup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) - }, -) -_sym_db.RegisterMessage(GetBackupRequest) - -UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( - "UpdateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.UpdateBackup]. - - Attributes: - backup: - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: \* ``backup.expire_time``. - update_mask: - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; this - prevents any future fields from being erased accidentally by - clients that do not know about them. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateBackupRequest) - }, -) -_sym_db.RegisterMessage(UpdateBackupRequest) - -DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( - "DeleteBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA - dmin.DeleteBackup]. - - Attributes: - name: - Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/b - ackups/{backup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteBackupRequest) - }, -) -_sym_db.RegisterMessage(DeleteBackupRequest) - -ListBackupsRequest = _reflection.GeneratedProtocolMessageType( - "ListBackupsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd - min.ListBackups]. - - Attributes: - parent: - Required. The cluster to list backups from. Values are of the - form ``projects/{project}/instances/{instance}/clusters/{clust - er}``. Use ``{cluster} = '-'`` to list backups for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - filter: - A filter expression that filters backups listed in the - response. The expression must specify the field name, a - comparison operator, and the value that you want to use for - filtering. The value must be a string, a number, or a boolean. - The comparison operator must be <, >, <=, >=, !=, =, or :. - Colon ‘:’ represents a HAS operator which is roughly - synonymous with equality. Filter rules are case insensitive. - The fields eligible for filtering are: \* ``name`` \* - ``source_table`` \* ``state`` \* ``start_time`` (and values - are of the format YYYY-MM-DDTHH:MM:SSZ) \* ``end_time`` (and - values are of the format YYYY-MM-DDTHH:MM:SSZ) \* - ``expire_time`` (and values are of the format YYYY-MM- - DDTHH:MM:SSZ) \* ``size_bytes`` To filter on multiple - expressions, provide each separate expression within - parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions - explicitly. Some examples of using filters are: - - ``name:"exact"`` –> The backup’s name is the string “exact”. - - ``name:howl`` –> The backup’s name contains the string “howl”. - - ``source_table:prod`` –> The source_table’s name contains - the string “prod”. - ``state:CREATING`` –> The backup is - pending creation. - ``state:READY`` –> The backup is fully - created and ready for use. - ``(name:howl) AND (start_time < - \"2018-03-28T14:50:00Z\")`` –> The backup name contains the - string “howl” and start_time of the backup is before - 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` –> The - backup’s size is greater than 10GB - order_by: - An expression for specifying the sort order of the results of - the request. The string value should specify one or more - fields in [Backup][google.bigtable.admin.v2.Backup]. The full - syntax is described at https://aip.dev/132#ordering. Fields - supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state For example, - “start_time”. The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" - should be appended to the field name. For example, “start_time - desc”. Redundant space characters in the syntax are - insigificant. If order_by is empty, results will be sorted by - ``start_time`` in descending order starting from the most - recently created backup. - page_size: - Number of backups to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next_page_token - ][google.bigtable.admin.v2.ListBackupsResponse.next_page_token - ] from a previous [ListBackupsResponse][google.bigtable.admin. - v2.ListBackupsResponse] to the same ``parent`` and with the - same ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsRequest) - }, -) -_sym_db.RegisterMessage(ListBackupsRequest) - -ListBackupsResponse = _reflection.GeneratedProtocolMessageType( - "ListBackupsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA - dmin.ListBackups]. - - Attributes: - backups: - The list of matching backups. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListBackups - ][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] - call to fetch more of the matching backups. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsResponse) - }, -) -_sym_db.RegisterMessage(ListBackupsResponse) - -RestoreTableRequest = _reflection.GeneratedProtocolMessageType( - "RestoreTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA - dmin.RestoreTable]. - - Attributes: - parent: - Required. The name of the instance in which to create the - restored table. This instance must be the parent of the source - backup. Values are of the form - ``projects//instances/``. - table_id: - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - source: - Required. The source from which to restore. - backup: - Name of the backup from which to restore. Values are of the - form ``projects//instances//clusters//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) - }, -) -_sym_db.RegisterMessage(RestoreTableRequest) - -RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( - "RestoreTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation returned by [RestoreTable - ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - Attributes: - name: - Name of the table being created and restored to. - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table, as - specified by ``source`` in [RestoreTableRequest][google.bigtab - le.admin.v2.RestoreTableRequest]. - optimize_table_operation_name: - If exists, the name of the long-running operation that will be - used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable after - the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress: - The progress of the [RestoreTable][google.bigtable.admin.v2.Bi - gtableTableAdmin.RestoreTable] operation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) - }, -) -_sym_db.RegisterMessage(RestoreTableMetadata) - -OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( - "OptimizeRestoredTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation used to track the - progress of optimizations performed on a newly restored table. This - long-running operation is automatically created by the system after - the successful completion of a table restore, and cannot be cancelled. - - Attributes: - name: - Name of the restored table being optimized. - progress: - The progress of the post-restore optimizations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) - }, -) -_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) - - -DESCRIPTOR._options = None -_CREATETABLEREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEREQUEST.fields_by_name["table"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["source_snapshot"]._options = None -_DROPROWRANGEREQUEST.fields_by_name["name"]._options = None -_LISTTABLESREQUEST.fields_by_name["parent"]._options = None -_GETTABLEREQUEST.fields_by_name["name"]._options = None -_DELETETABLEREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["modifications"]._options = None -_GENERATECONSISTENCYTOKENREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["consistency_token"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["name"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["cluster"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["snapshot_id"]._options = None -_GETSNAPSHOTREQUEST.fields_by_name["name"]._options = None -_LISTSNAPSHOTSREQUEST.fields_by_name["parent"]._options = None -_DELETESNAPSHOTREQUEST.fields_by_name["name"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_GETBACKUPREQUEST.fields_by_name["name"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None -_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None -_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None - -_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( - name="BigtableTableAdmin", - full_name="google.bigtable.admin.v2.BigtableTableAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=4604, - serialized_end=9284, - methods=[ - _descriptor.MethodDescriptor( - name="CreateTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - index=0, - containing_service=None, - input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*\332A\025parent,table_id,table', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateTableFromSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - index=1, - containing_service=None, - input_type=_CREATETABLEFROMSNAPSHOTREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*\332A\037parent,table_id,source_snapshot\312A(\n\005Table\022\037CreateTableFromSnapshotMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListTables", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - index=2, - containing_service=None, - input_type=_LISTTABLESREQUEST, - output_type=_LISTTABLESRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - index=3, - containing_service=None, - input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - index=4, - containing_service=None, - input_type=_DELETETABLEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ModifyColumnFamilies", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - index=5, - containing_service=None, - input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*\332A\022name,modifications', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DropRowRange", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - index=6, - containing_service=None, - input_type=_DROPROWRANGEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GenerateConsistencyToken", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - index=7, - containing_service=None, - input_type=_GENERATECONSISTENCYTOKENREQUEST, - output_type=_GENERATECONSISTENCYTOKENRESPONSE, - serialized_options=b'\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*\332A\004name', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckConsistency", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - index=8, - containing_service=None, - input_type=_CHECKCONSISTENCYREQUEST, - output_type=_CHECKCONSISTENCYRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*\332A\026name,consistency_token', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SnapshotTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - index=9, - containing_service=None, - input_type=_SNAPSHOTTABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*\332A$name,cluster,snapshot_id,description\312A!\n\010Snapshot\022\025SnapshotTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - index=10, - containing_service=None, - input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - serialized_options=b"\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListSnapshots", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - index=11, - containing_service=None, - input_type=_LISTSNAPSHOTSREQUEST, - output_type=_LISTSNAPSHOTSRESPONSE, - serialized_options=b"\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - index=12, - containing_service=None, - input_type=_DELETESNAPSHOTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", - index=13, - containing_service=None, - input_type=_CREATEBACKUPREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\312A\036\n\006Backup\022\024CreateBackupMetadata\332A\027parent,backup_id,backup', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", - index=14, - containing_service=None, - input_type=_GETBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\0028\0226/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", - index=15, - containing_service=None, - input_type=_UPDATEBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\002G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\006backup\332A\022backup,update_mask", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", - index=16, - containing_service=None, - input_type=_DELETEBACKUPREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0028*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListBackups", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", - index=17, - containing_service=None, - input_type=_LISTBACKUPSREQUEST, - output_type=_LISTBACKUPSRESPONSE, - serialized_options=b"\202\323\344\223\0028\0226/v2/{parent=projects/*/instances/*/clusters/*}/backups\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="RestoreTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", - index=18, - containing_service=None, - input_type=_RESTORETABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027"2/v2/{parent=projects/*/instances/*}/tables:restore:\001*\312A\035\n\005Table\022\024RestoreTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - index=19, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - index=20, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - index=21, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002\232\001"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) - -DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py deleted file mode 100644 index 949de429e..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ /dev/null @@ -1,1083 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.CreateTableFromSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListTables = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - ) - self.GetTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DeleteTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ModifyColumnFamilies = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DropRowRange = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GenerateConsistencyToken = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - ) - self.CheckConsistency = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - ) - self.SnapshotTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - ) - self.ListSnapshots = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - ) - self.DeleteSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.UpdateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.DeleteBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListBackups = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - ) - self.RestoreTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateTableFromSnapshot(self, request, context): - """Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTables(self, request, context): - """Lists all tables served from a specified instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTable(self, request, context): - """Gets metadata information about the specified table.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ModifyColumnFamilies(self, request, context): - """Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GenerateConsistencyToken(self, request, context): - """Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckConsistency(self, request, context): - """Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SnapshotTable(self, request, context): - """Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSnapshot(self, request, context): - """Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSnapshots(self, request, context): - """Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSnapshot(self, request, context): - """Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateBackup(self, request, context): - """Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be used to - track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - returned operation will stop the creation and delete the backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetBackup(self, request, context): - """Gets metadata on a pending or completed Cloud Bigtable Backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateBackup(self, request, context): - """Updates a pending or completed Cloud Bigtable Backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteBackup(self, request, context): - """Deletes a pending or completed Cloud Bigtable backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListBackups(self, request, context): - """Lists Cloud Bigtable backups. Returns both completed and pending - backups. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def RestoreTable(self, request, context): - """Create a new table by restoring from a completed backup. The new table - must be in the same instance as the instance containing the backup. The - returned table [long-running operation][google.longrunning.Operation] can - be used to track the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The - [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified table resource.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateTable": grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListTables": grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, - ), - "GetTable": grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DeleteTable": grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DropRowRange": grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( - servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, - ), - "CheckConsistency": grpc.unary_unary_rpc_method_handler( - servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, - ), - "SnapshotTable": grpc.unary_unary_rpc_method_handler( - servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, - ), - "ListSnapshots": grpc.unary_unary_rpc_method_handler( - servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, - ), - "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateBackup": grpc.unary_unary_rpc_method_handler( - servicer.CreateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetBackup": grpc.unary_unary_rpc_method_handler( - servicer.GetBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "UpdateBackup": grpc.unary_unary_rpc_method_handler( - servicer.UpdateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "DeleteBackup": grpc.unary_unary_rpc_method_handler( - servicer.DeleteBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListBackups": grpc.unary_unary_rpc_method_handler( - servicer.ListBackups, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString, - ), - "RestoreTable": grpc.unary_unary_rpc_method_handler( - servicer.RestoreTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableTableAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - @staticmethod - def CreateTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateTableFromSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListTables( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ModifyColumnFamilies( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DropRowRange( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GenerateConsistencyToken( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckConsistency( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SnapshotTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListSnapshots( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListBackups( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def RestoreTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f5..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee1..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto b/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede655..000000000 --- a/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/google/cloud/bigtable_admin_v2/proto/common_pb2.py deleted file mode 100644 index e07dea1d1..000000000 --- a/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/common.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/common.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_STORAGETYPE = _descriptor.EnumDescriptor( - name="StorageType", - full_name="google.bigtable.admin.v2.StorageType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STORAGE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SSD", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="HDD", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=254, - serialized_end=315, -) -_sym_db.RegisterEnumDescriptor(_STORAGETYPE) - -StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) -STORAGE_TYPE_UNSPECIFIED = 0 -SSD = 1 -HDD = 2 - - -_OPERATIONPROGRESS = _descriptor.Descriptor( - name="OperationProgress", - full_name="google.bigtable.admin.v2.OperationProgress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.bigtable.admin.v2.OperationProgress.progress_percent", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.OperationProgress.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.OperationProgress.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=113, - serialized_end=252, -) - -_OPERATIONPROGRESS.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONPROGRESS.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS -DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OperationProgress = _reflection.GeneratedProtocolMessageType( - "OperationProgress", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONPROGRESS, - "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", - "__doc__": """Encapsulates progress related information for a Cloud Bigtable long - running operation. - - Attributes: - progress_percent: - Percent completion of the operation. Values are between 0 and - 100 inclusive. - start_time: - Time the request was received. - end_time: - If set, the time at which this operation failed or was - completed successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OperationProgress) - }, -) -_sym_db.RegisterMessage(OperationProgress) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2.py deleted file mode 100644 index 4f3ce0a5b..000000000 --- a/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ /dev/null @@ -1,893 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/instance.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/instance.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xd5\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - ], -) - - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Instance.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=474, - serialized_end=527, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - -_INSTANCE_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.bigtable.admin.v2.Instance.Type", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRODUCTION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=529, - serialized_end=590, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) - -_CLUSTER_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Cluster.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RESIZING", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DISABLED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=917, - serialized_end=998, -) -_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=427, - serialized_end=472, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.bigtable.admin.v2.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.bigtable.admin.v2.Instance.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Instance.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.bigtable.admin.v2.Instance.type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.admin.v2.Instance.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _INSTANCE_LABELSENTRY, - ], - enum_types=[ - _INSTANCE_STATE, - _INSTANCE_TYPE, - ], - serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=193, - serialized_end=670, -) - - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.bigtable.admin.v2.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Cluster.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.bigtable.admin.v2.Cluster.location", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Cluster.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="serve_nodes", - full_name="google.bigtable.admin.v2.Cluster.serve_nodes", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="default_storage_type", - full_name="google.bigtable.admin.v2.Cluster.default_storage_type", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _CLUSTER_STATE, - ], - serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=673, - serialized_end=1096, -) - - -_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( - name="MultiClusterRoutingUseAny", - full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1360, - serialized_end=1387, -) - -_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( - name="SingleClusterRouting", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="allow_transactional_writes", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1389, - serialized_end=1467, -) - -_APPPROFILE = _descriptor.Descriptor( - name="AppProfile", - full_name="google.bigtable.admin.v2.AppProfile", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.AppProfile.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.bigtable.admin.v2.AppProfile.etag", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.AppProfile.description", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="multi_cluster_routing_use_any", - full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="single_cluster_routing", - full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - _APPPROFILE_SINGLECLUSTERROUTING, - ], - enum_types=[], - serialized_options=b'\352Ag\n"bigtable.googleapis.com/AppProfile\022Aprojects/{project}/instances/{instance}/appProfiles/{app_profile}', - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="routing_policy", - full_name="google.bigtable.admin.v2.AppProfile.routing_policy", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1099, - serialized_end=1593, -) - -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_INSTANCE_TYPE.containing_type = _INSTANCE -_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name[ - "default_storage_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._STORAGETYPE -) -_CLUSTER_STATE.containing_type = _CLUSTER -_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE -_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].message_type = _APPPROFILE_SINGLECLUSTERROUTING -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] -) -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["single_cluster_routing"] -) -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCE_LABELSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - }, - ), - "DESCRIPTOR": _INSTANCE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an instance are served - from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - - Attributes: - name: - The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name: - Required. The descriptive name for this instance as it appears - in UIs. Can be changed at any time, but should be kept - globally unique to avoid confusion. - state: - (\ ``OutputOnly``) The current state of the instance. - type: - The type of the instance. Defaults to ``PRODUCTION``. - labels: - Labels are a flexible and lightweight mechanism for organizing - cloud resources into groups that reflect a customer’s - organizational needs and deployment strategies. They can be - used to filter resources and aggregate metrics. - Label keys - must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values - must be between 0 and 63 characters long and must conform - to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - Keys and values must both be under 128 bytes. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - }, -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A resizable group of nodes in a particular cloud location, capable of - serving all [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - - Attributes: - name: - The unique name of the cluster. Values are of the form ``proje - cts/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location: - (\ ``CreationOnly``) The location where this cluster’s nodes - and storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects/{project}/locations/{zone}``. - state: - The current state of the cluster. - serve_nodes: - Required. The number of nodes allocated to this cluster. More - nodes enable higher throughput and more consistent - performance. - default_storage_type: - (\ ``CreationOnly``) The type of storage used by this cluster - to serve its parent instance’s tables, unless explicitly - overridden. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) - -AppProfile = _reflection.GeneratedProtocolMessageType( - "AppProfile", - (_message.Message,), - { - "MultiClusterRoutingUseAny": _reflection.GeneratedProtocolMessageType( - "MultiClusterRoutingUseAny", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Read/write requests are routed to the nearest cluster in the instance, - and will fail over to the nearest cluster that is available in the - event of transient errors or delays. Clusters in a region are - considered equidistant. Choosing this option sacrifices read-your- - writes consistency to improve availability.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - }, - ), - "SingleClusterRouting": _reflection.GeneratedProtocolMessageType( - "SingleClusterRouting", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_SINGLECLUSTERROUTING, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Unconditionally routes all read/write requests to a specific cluster. - This option preserves read-your-writes consistency but does not - improve availability. - - Attributes: - cluster_id: - The cluster to which read/write requests should be routed. - allow_transactional_writes: - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - }, - ), - "DESCRIPTOR": _APPPROFILE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A configuration object describing how Cloud Bigtable should treat - traffic from a particular end user application. - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the app profile. Values - are of the form - ``projects//instances//appProfiles/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. - etag: - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there has - been a modification in the mean time. The ``update_mask`` of - the request need not include ``etag`` for this protection to - apply. See `Wikipedia - `__ and `RFC 7232 - `__ for more - details. - description: - Optional long form description of the use case for this - AppProfile. - routing_policy: - The routing policy for all read/write requests that use this - app profile. A value must be explicitly set. - multi_cluster_routing_use_any: - Use a multi-cluster routing policy. - single_cluster_routing: - Use a single-cluster routing policy. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - }, -) -_sym_db.RegisterMessage(AppProfile) -_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) -_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) - - -DESCRIPTOR._options = None -_INSTANCE_LABELSENTRY._options = None -_INSTANCE.fields_by_name["name"]._options = None -_INSTANCE.fields_by_name["display_name"]._options = None -_INSTANCE._options = None -_CLUSTER.fields_by_name["name"]._options = None -_CLUSTER.fields_by_name["location"]._options = None -_CLUSTER.fields_by_name["state"]._options = None -_CLUSTER.fields_by_name["serve_nodes"]._options = None -_CLUSTER._options = None -_APPPROFILE._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/google/cloud/bigtable_admin_v2/proto/table_pb2.py deleted file mode 100644 index 71191acba..000000000 --- a/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ /dev/null @@ -1,1694 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/table.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/table.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xd2\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_RESTORESOURCETYPE = _descriptor.EnumDescriptor( - name="RestoreSourceType", - full_name="google.bigtable.admin.v2.RestoreSourceType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="RESTORE_SOURCE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BACKUP", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2893, - serialized_end=2961, -) -_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) - -RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) -RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 -BACKUP = 1 - - -_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name="ReplicationState", - full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INITIALIZING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PLANNED_MAINTENANCE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNPLANNED_MAINTENANCE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY_OPTIMIZING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=783, - serialized_end=925, -) -_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name="TimestampGranularity", - full_name="google.bigtable.admin.v2.Table.TimestampGranularity", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TIMESTAMP_GRANULARITY_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MILLIS", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1122, - serialized_end=1195, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - -_TABLE_VIEW = _descriptor.EnumDescriptor( - name="View", - full_name="google.bigtable.admin.v2.Table.View", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NAME_ONLY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="REPLICATION_VIEW", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FULL", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1197, - serialized_end=1289, -) -_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) - -_SNAPSHOT_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Snapshot.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2077, - serialized_end=2130, -) -_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) - -_BACKUP_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Backup.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2555, - serialized_end=2610, -) -_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) - - -_RESTOREINFO = _descriptor.Descriptor( - name="RestoreInfo", - full_name="google.bigtable.admin.v2.RestoreInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreInfo.source_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreInfo.backup_info", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreInfo.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=204, - serialized_end=359, -) - - -_TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name="ClusterState", - full_name="google.bigtable.admin.v2.Table.ClusterState", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="replication_state", - full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _TABLE_CLUSTERSTATE_REPLICATIONSTATE, - ], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=676, - serialized_end=925, -) - -_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name="ClusterStatesEntry", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=1025, -) - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name="ColumnFamiliesEntry", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1027, - serialized_end=1120, -) - -_TABLE = _descriptor.Descriptor( - name="Table", - full_name="google.bigtable.admin.v2.Table", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Table.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_states", - full_name="google.bigtable.admin.v2.Table.cluster_states", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_families", - full_name="google.bigtable.admin.v2.Table.column_families", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="granularity", - full_name="google.bigtable.admin.v2.Table.granularity", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="restore_info", - full_name="google.bigtable.admin.v2.Table.restore_info", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _TABLE_CLUSTERSTATE, - _TABLE_CLUSTERSTATESENTRY, - _TABLE_COLUMNFAMILIESENTRY, - ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - _TABLE_VIEW, - ], - serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=362, - serialized_end=1381, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name="ColumnFamily", - full_name="google.bigtable.admin.v2.ColumnFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gc_rule", - full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1383, - serialized_end=1448, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name="Intersection", - full_name="google.bigtable.admin.v2.GcRule.Intersection", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1663, - serialized_end=1726, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name="Union", - full_name="google.bigtable.admin.v2.GcRule.Union", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Union.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1728, - serialized_end=1784, -) - -_GCRULE = _descriptor.Descriptor( - name="GcRule", - full_name="google.bigtable.admin.v2.GcRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="max_num_versions", - full_name="google.bigtable.admin.v2.GcRule.max_num_versions", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_age", - full_name="google.bigtable.admin.v2.GcRule.max_age", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="intersection", - full_name="google.bigtable.admin.v2.GcRule.intersection", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="union", - full_name="google.bigtable.admin.v2.GcRule.union", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _GCRULE_INTERSECTION, - _GCRULE_UNION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.admin.v2.GcRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1451, - serialized_end=1792, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="google.bigtable.admin.v2.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Snapshot.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Snapshot.source_table", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="data_size_bytes", - full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.bigtable.admin.v2.Snapshot.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_time", - full_name="google.bigtable.admin.v2.Snapshot.delete_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Snapshot.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.Snapshot.description", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _SNAPSHOT_STATE, - ], - serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1795, - serialized_end=2250, -) - - -_BACKUP = _descriptor.Descriptor( - name="Backup", - full_name="google.bigtable.admin.v2.Backup", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Backup.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Backup.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\005\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.bigtable.admin.v2.Backup.expire_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.Backup.start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.Backup.end_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="size_bytes", - full_name="google.bigtable.admin.v2.Backup.size_bytes", - index=5, - number=6, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Backup.state", - index=6, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _BACKUP_STATE, - ], - serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2253, - serialized_end=2724, -) - - -_BACKUPINFO = _descriptor.Descriptor( - name="BackupInfo", - full_name="google.bigtable.admin.v2.BackupInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.BackupInfo.backup", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.BackupInfo.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.BackupInfo.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.BackupInfo.source_table", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2727, - serialized_end=2891, -) - -_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE -_RESTOREINFO.fields_by_name["backup_info"].message_type = _BACKUPINFO -_RESTOREINFO.oneofs_by_name["source_info"].fields.append( - _RESTOREINFO.fields_by_name["backup_info"] -) -_RESTOREINFO.fields_by_name[ - "backup_info" -].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] -_TABLE_CLUSTERSTATE.fields_by_name[ - "replication_state" -].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE -_TABLE_CLUSTERSTATE.containing_type = _TABLE -_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE -_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY -_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE.fields_by_name["restore_info"].message_type = _RESTOREINFO -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name[ - "max_age" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) -_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ - "rule" -] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) -_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) -_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) -_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE -_SNAPSHOT.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name[ - "delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE -_SNAPSHOT_STATE.containing_type = _SNAPSHOT -_BACKUP.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE -_BACKUP_STATE.containing_type = _BACKUP -_BACKUPINFO.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUPINFO.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO -DESCRIPTOR.message_types_by_name["Table"] = _TABLE -DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP -DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO -DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -RestoreInfo = _reflection.GeneratedProtocolMessageType( - "RestoreInfo", - (_message.Message,), - { - "DESCRIPTOR": _RESTOREINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a table restore. - - Attributes: - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table. - backup_info: - Information about the backup used to restore the table. The - backup may no longer exist. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreInfo) - }, -) -_sym_db.RegisterMessage(RestoreInfo) - -Table = _reflection.GeneratedProtocolMessageType( - "Table", - (_message.Message,), - { - "ClusterState": _reflection.GeneratedProtocolMessageType( - "ClusterState", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """The state of a table’s data in a particular cluster. - - Attributes: - replication_state: - Output only. The state of replication for the table in this - cluster. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - }, - ), - "ClusterStatesEntry": _reflection.GeneratedProtocolMessageType( - "ClusterStatesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - }, - ), - "ColumnFamiliesEntry": _reflection.GeneratedProtocolMessageType( - "ColumnFamiliesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_COLUMNFAMILIESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - }, - ), - "DESCRIPTOR": _TABLE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A collection of user data indexed by row, column, and timestamp. Each - table is served using the resources of its parent cluster. - - Attributes: - name: - Output only. The unique name of the table. Values are of the - form ``projects//instances//tables/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, - ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states: - Output only. Map from cluster ID to per-cluster table state. - If it could not be determined whether or not the table has - data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster with - UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, - ``FULL`` - column_families: - (\ ``CreationOnly``) The column families configured for this - table, mapped by column family ID. Views: ``SCHEMA_VIEW``, - ``FULL`` - granularity: - (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at - which timestamps are stored in this table. Timestamps not - matching the granularity will be rejected. If unspecified at - creation time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL``. - restore_info: - Output only. If this table was restored from another data - source (e.g. a backup), this field will be populated with - information about the restore. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - }, -) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ClusterState) -_sym_db.RegisterMessage(Table.ClusterStatesEntry) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType( - "ColumnFamily", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNFAMILY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A set of columns within a table which share a common configuration. - - Attributes: - gc_rule: - Garbage collection rule specified as a protobuf. Must - serialize to at most 500 bytes. NOTE: Garbage collection - executes opportunistically in the background, and so it’s - possible for reads to return a cell even if it matches the - active GC expression for its family. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - }, -) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType( - "GcRule", - (_message.Message,), - { - "Intersection": _reflection.GeneratedProtocolMessageType( - "Intersection", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_INTERSECTION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching all of the given rules. - - Attributes: - rules: - Only delete cells which would be deleted by every element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - }, - ), - "Union": _reflection.GeneratedProtocolMessageType( - "Union", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_UNION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching any of the given rules. - - Attributes: - rules: - Delete cells which would be deleted by any element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - }, - ), - "DESCRIPTOR": _GCRULE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Rule for determining which cells to delete during garbage collection. - - Attributes: - rule: - Garbage collection rules. - max_num_versions: - Delete all cells in a column except the most recent N. - max_age: - Delete cells in a column older than the given age. Values must - be at least one millisecond, and will be truncated to - microsecond granularity. - intersection: - Delete cells that would be deleted by every nested rule. - union: - Delete cells that would be deleted by any nested rule. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - }, -) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOT, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A snapshot of a table at a particular time. A snapshot can be used as - a checkpoint for data restoration or a data source for a new table. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Output only. The unique name of the snapshot. Values are of - the form ``projects//instances//clusters//snapshots/``. - source_table: - Output only. The source table at the time the snapshot was - taken. - data_size_bytes: - Output only. The size of the data in the source table at the - time the snapshot was taken. In some cases, this value may be - computed asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time: - Output only. The time when the snapshot is created. - delete_time: - Output only. The time when the snapshot will be deleted. The - maximum amount of time a snapshot can stay active is 365 days. - If ‘ttl’ is not specified, the default maximum of 365 days - will be used. - state: - Output only. The current state of the snapshot. - description: - Output only. Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - }, -) -_sym_db.RegisterMessage(Snapshot) - -Backup = _reflection.GeneratedProtocolMessageType( - "Backup", - (_message.Message,), - { - "DESCRIPTOR": _BACKUP, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A backup of a Cloud Bigtable table. - - Attributes: - name: - Output only. A globally unique identifier for the backup which - cannot be changed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/ - backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the - name must be between 1 and 50 characters in length. The - backup is stored in the cluster identified by the prefix of - the backup name of the form ``projects/{project}/instances/{in - stance}/clusters/{cluster}``. - source_table: - Required. Immutable. Name of the table from which this backup - was created. This needs to be in the same instance as the - backup. Values are of the form ``projects/{project}/instances/ - {instance}/tables/{source_table}``. - expire_time: - Required. The expiration time of the backup, with microseconds - granularity that must be at least 6 hours and at most 30 days - from the time the request is received. Once the - ``expire_time`` has passed, Cloud Bigtable will delete the - backup and free the resources used by the backup. - start_time: - Output only. ``start_time`` is the time that the backup was - started (i.e. approximately the time the [CreateBackup][google - .bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is - received). The row data in this backup will be no older than - this timestamp. - end_time: - Output only. ``end_time`` is the time that the backup was - finished. The row data in the backup will be no newer than - this timestamp. - size_bytes: - Output only. Size of the backup in bytes. - state: - Output only. The current state of the backup. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Backup) - }, -) -_sym_db.RegisterMessage(Backup) - -BackupInfo = _reflection.GeneratedProtocolMessageType( - "BackupInfo", - (_message.Message,), - { - "DESCRIPTOR": _BACKUPINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a backup. - - Attributes: - backup: - Output only. Name of the backup. - start_time: - Output only. The time that the backup was started. Row data in - the backup will be no older than this timestamp. - end_time: - Output only. This time that the backup was finished. Row data - in the backup will be no newer than this timestamp. - source_table: - Output only. Name of the table the backup was created from. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.BackupInfo) - }, -) -_sym_db.RegisterMessage(BackupInfo) - - -DESCRIPTOR._options = None -_TABLE_CLUSTERSTATESENTRY._options = None -_TABLE_COLUMNFAMILIESENTRY._options = None -_TABLE._options = None -_SNAPSHOT._options = None -_BACKUP.fields_by_name["name"]._options = None -_BACKUP.fields_by_name["source_table"]._options = None -_BACKUP.fields_by_name["expire_time"]._options = None -_BACKUP.fields_by_name["start_time"]._options = None -_BACKUP.fields_by_name["end_time"]._options = None -_BACKUP.fields_by_name["size_bytes"]._options = None -_BACKUP.fields_by_name["state"]._options = None -_BACKUP._options = None -_BACKUPINFO.fields_by_name["backup"]._options = None -_BACKUPINFO.fields_by_name["start_time"]._options = None -_BACKUPINFO.fields_by_name["end_time"]._options = None -_BACKUPINFO.fields_by_name["source_table"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_admin_v2/py.typed b/google/cloud/bigtable_admin_v2/py.typed new file mode 100644 index 000000000..bc26f2069 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable-admin package uses inline types. diff --git a/google/cloud/bigtable_admin_v2/services/__init__.py b/google/cloud/bigtable_admin_v2/services/__init__.py new file mode 100644 index 000000000..42ffdf2bc --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py new file mode 100644 index 000000000..5606dd4ff --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import BigtableInstanceAdminClient +from .async_client import BigtableInstanceAdminAsyncClient + +__all__ = ( + "BigtableInstanceAdminClient", + "BigtableInstanceAdminAsyncClient", +) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py new file mode 100644 index 000000000..4df47ff4a --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -0,0 +1,1935 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .client import BigtableInstanceAdminClient + + +class BigtableInstanceAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + _client: BigtableInstanceAdminClient + + DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + + app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) + parse_app_profile_path = staticmethod( + BigtableInstanceAdminClient.parse_app_profile_path + ) + cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) + instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + + common_billing_account_path = staticmethod( + BigtableInstanceAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableInstanceAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + BigtableInstanceAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + BigtableInstanceAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + BigtableInstanceAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) + parse_common_project_path = staticmethod( + BigtableInstanceAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod( + BigtableInstanceAdminClient.common_location_path + ) + parse_common_location_path = staticmethod( + BigtableInstanceAdminClient.parse_common_location_path + ) + + from_service_account_info = BigtableInstanceAdminClient.from_service_account_info + from_service_account_file = BigtableInstanceAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableInstanceAdminClient).get_transport_class, + type(BigtableInstanceAdminClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableInstanceAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_instance( + self, + request: bigtable_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: gba_instance.Instance = None, + clusters: Sequence[ + bigtable_instance_admin.CreateInstanceRequest.ClustersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create an instance within a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (:class:`str`): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (:class:`str`): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + if clusters: + request.clusters.update(clusters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_instance, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def get_instance( + self, + request: bigtable_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (:class:`str`): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_instances( + self, + request: bigtable_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListInstancesRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (:class:`str`): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_instance( + self, + request: instance.Instance = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def partial_update_instance( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + *, + instance: gba_instance.Instance = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partial_update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def delete_instance( + self, + request: bigtable_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (:class:`str`): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_instance, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_cluster( + self, + request: bigtable_instance_admin.CreateClusterRequest = None, + *, + parent: str = None, + cluster_id: str = None, + cluster: instance.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: bigtable_instance_admin.GetClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (:class:`str`): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_clusters( + self, + request: bigtable_instance_admin.ListClustersRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListClustersRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: instance.Cluster = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + The request object. A resizable group of nodes in a + particular cloud location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: bigtable_instance_admin.DeleteClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (:class:`str`): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_app_profile( + self, + request: bigtable_instance_admin.CreateAppProfileRequest = None, + *, + parent: str = None, + app_profile_id: str = None, + app_profile: instance.AppProfile = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_app_profile( + self, + request: bigtable_instance_admin.GetAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (:class:`str`): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_app_profiles( + self, + request: bigtable_instance_admin.ListAppProfilesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesAsyncPager: + r"""Lists information about app profiles in an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListAppProfilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_app_profiles, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAppProfilesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_app_profile( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest = None, + *, + app_profile: instance.AppProfile = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an app profile within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("app_profile.name", request.app_profile.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + async def delete_app_profile( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (:class:`str`): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py new file mode 100644 index 000000000..8e6f504da --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -0,0 +1,2069 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableInstanceAdminGrpcTransport +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport + + +class BigtableInstanceAdminClientMeta(type): + """Metaclass for the BigtableInstanceAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[BigtableInstanceAdminTransport]] + _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[BigtableInstanceAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def app_profile_path(project: str, instance: str, app_profile: str,) -> str: + """Return a fully-qualified app_profile string.""" + return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, instance=instance, app_profile=app_profile, + ) + + @staticmethod + def parse_app_profile_path(path: str) -> Dict[str, str]: + """Parse a app_profile path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str, instance: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableInstanceAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableInstanceAdminTransport): + # transport is a BigtableInstanceAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_instance( + self, + request: bigtable_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: gba_instance.Instance = None, + clusters: Sequence[ + bigtable_instance_admin.CreateInstanceRequest.ClustersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create an instance within a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (str): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): + request = bigtable_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + if clusters: + request.clusters.update(clusters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + def get_instance( + self, + request: bigtable_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (str): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): + request = bigtable_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_instances( + self, + request: bigtable_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListInstancesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (str): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): + request = bigtable_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_instance( + self, + request: instance.Instance = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (google.cloud.bigtable_admin_v2.types.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a instance.Instance. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Instance): + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def partial_update_instance( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + *, + instance: gba_instance.Instance = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.PartialUpdateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, bigtable_instance_admin.PartialUpdateInstanceRequest + ): + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partial_update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + def delete_instance( + self, + request: bigtable_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): + request = bigtable_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_cluster( + self, + request: bigtable_instance_admin.CreateClusterRequest = None, + *, + parent: str = None, + cluster_id: str = None, + cluster: instance.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (str): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): + request = bigtable_instance_admin.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + def get_cluster( + self, + request: bigtable_instance_admin.GetClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (str): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetClusterRequest): + request = bigtable_instance_admin.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_clusters( + self, + request: bigtable_instance_admin.ListClustersRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListClustersRequest): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (str): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListClustersRequest): + request = bigtable_instance_admin.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_cluster( + self, + request: instance.Cluster = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.Cluster): + The request object. A resizable group of nodes in a + particular cloud location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a instance.Cluster. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Cluster): + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: bigtable_instance_admin.DeleteClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): + request = bigtable_instance_admin.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_app_profile( + self, + request: bigtable_instance_admin.CreateAppProfileRequest = None, + *, + parent: str = None, + app_profile_id: str = None, + app_profile: instance.AppProfile = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (str): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): + request = bigtable_instance_admin.CreateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_app_profile( + self, + request: bigtable_instance_admin.GetAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): + request = bigtable_instance_admin.GetAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_app_profiles( + self, + request: bigtable_instance_admin.ListAppProfilesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesPager: + r"""Lists information about app profiles in an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (str): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListAppProfilesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): + request = bigtable_instance_admin.ListAppProfilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_app_profiles] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAppProfilesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_app_profile( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest = None, + *, + app_profile: instance.AppProfile = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates an app profile within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.UpdateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("app_profile.name", request.app_profile.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + def delete_app_profile( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (str): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for `GetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for `SetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for + `TestIamPermissions` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (Sequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableInstanceAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py new file mode 100644 index 000000000..f70936b5b --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance + + +class ListAppProfilesPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListAppProfilesResponse], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[instance.AppProfile]: + for page in self.pages: + yield from page.app_profiles + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAppProfilesAsyncPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListAppProfilesResponse] + ], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[instance.AppProfile]: + async def async_generator(): + async for page in self.pages: + for response in page.app_profiles: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py new file mode 100644 index 000000000..23b510711 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableInstanceAdminTransport +from .grpc import BigtableInstanceAdminGrpcTransport +from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[BigtableInstanceAdminTransport]] +_transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + +__all__ = ( + "BigtableInstanceAdminTransport", + "BigtableInstanceAdminGrpcTransport", + "BigtableInstanceAdminGrpcAsyncIOTransport", +) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py new file mode 100644 index 000000000..004424c28 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -0,0 +1,491 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableInstanceAdminTransport(abc.ABC): + """Abstract transport class for BigtableInstanceAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_instance: gapic_v1.method.wrap_method( + self.create_instance, default_timeout=300.0, client_info=client_info, + ), + self.get_instance: gapic_v1.method.wrap_method( + self.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_instance: gapic_v1.method.wrap_method( + self.update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_instance: gapic_v1.method.wrap_method( + self.partial_update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_instance: gapic_v1.method.wrap_method( + self.delete_instance, default_timeout=60.0, client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, default_timeout=60.0, client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, default_timeout=60.0, client_info=client_info, + ), + self.create_app_profile: gapic_v1.method.wrap_method( + self.create_app_profile, default_timeout=60.0, client_info=client_info, + ), + self.get_app_profile: gapic_v1.method.wrap_method( + self.get_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_app_profiles: gapic_v1.method.wrap_method( + self.list_app_profiles, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_app_profile: gapic_v1.method.wrap_method( + self.update_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_app_profile: gapic_v1.method.wrap_method( + self.delete_app_profile, default_timeout=60.0, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=60.0, client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetInstanceRequest], + typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + ]: + raise NotImplementedError() + + @property + def list_instances( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListInstancesRequest], + typing.Union[ + bigtable_instance_admin.ListInstancesResponse, + typing.Awaitable[bigtable_instance_admin.ListInstancesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_instance( + self, + ) -> typing.Callable[ + [instance.Instance], + typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + ]: + raise NotImplementedError() + + @property + def partial_update_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetClusterRequest], + typing.Union[instance.Cluster, typing.Awaitable[instance.Cluster]], + ]: + raise NotImplementedError() + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListClustersRequest], + typing.Union[ + bigtable_instance_admin.ListClustersResponse, + typing.Awaitable[bigtable_instance_admin.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [instance.Cluster], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + ]: + raise NotImplementedError() + + @property + def get_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + ]: + raise NotImplementedError() + + @property + def list_app_profiles( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + typing.Union[ + bigtable_instance_admin.ListAppProfilesResponse, + typing.Awaitable[bigtable_instance_admin.ListAppProfilesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableInstanceAdminTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py new file mode 100644 index 000000000..0cbca1c67 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -0,0 +1,794 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): + """gRPC backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], operations.Operation + ]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Returns: + Callable[[~.CreateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def get_instance( + self, + ) -> Callable[[bigtable_instance_admin.GetInstanceRequest], instance.Instance]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def list_instances( + self, + ) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + bigtable_instance_admin.ListInstancesResponse, + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + ~.ListInstancesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["update_instance"] + + @property + def partial_update_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], operations.Operation + ]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_instance" not in self._stubs: + self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["partial_update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty.Empty]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def create_cluster( + self, + ) -> Callable[[bigtable_instance_admin.CreateClusterRequest], operations.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[[bigtable_instance_admin.GetClusterRequest], instance.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + bigtable_instance_admin.ListClustersResponse, + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def update_cluster(self) -> Callable[[instance.Cluster], operations.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Returns: + Callable[[~.Cluster], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=instance.Cluster.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty.Empty]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def create_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], instance.AppProfile + ]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_app_profile" not in self._stubs: + self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["create_app_profile"] + + @property + def get_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.GetAppProfileRequest], instance.AppProfile]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_app_profile" not in self._stubs: + self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["get_app_profile"] + + @property + def list_app_profiles( + self, + ) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + bigtable_instance_admin.ListAppProfilesResponse, + ]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + ~.ListAppProfilesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_app_profiles" not in self._stubs: + self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs["list_app_profiles"] + + @property + def update_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], operations.Operation + ]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_app_profile" not in self._stubs: + self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_app_profile"] + + @property + def delete_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty.Empty]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_app_profile" not in self._stubs: + self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_app_profile"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableInstanceAdminGrpcTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000..e5fbf6a4c --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -0,0 +1,822 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableInstanceAdminGrpcTransport + + +class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): + """gRPC AsyncIO backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + self._operations_client = None + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Returns: + Callable[[~.CreateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def get_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], Awaitable[instance.Instance] + ]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def list_instances( + self, + ) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + Awaitable[bigtable_instance_admin.ListInstancesResponse], + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + Awaitable[~.ListInstancesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def update_instance( + self, + ) -> Callable[[instance.Instance], Awaitable[instance.Instance]]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["update_instance"] + + @property + def partial_update_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_instance" not in self._stubs: + self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["partial_update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def create_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], Awaitable[instance.Cluster] + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + Awaitable[bigtable_instance_admin.ListClustersResponse], + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def update_cluster( + self, + ) -> Callable[[instance.Cluster], Awaitable[operations.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Returns: + Callable[[~.Cluster], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=instance.Cluster.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def create_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + Awaitable[instance.AppProfile], + ]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_app_profile" not in self._stubs: + self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["create_app_profile"] + + @property + def get_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], Awaitable[instance.AppProfile] + ]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_app_profile" not in self._stubs: + self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["get_app_profile"] + + @property + def list_app_profiles( + self, + ) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + Awaitable[bigtable_instance_admin.ListAppProfilesResponse], + ]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + Awaitable[~.ListAppProfilesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_app_profiles" not in self._stubs: + self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs["list_app_profiles"] + + @property + def update_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_app_profile" not in self._stubs: + self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_app_profile"] + + @property + def delete_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_app_profile" not in self._stubs: + self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_app_profile"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/google/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py similarity index 69% rename from google/__init__.py rename to google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index abc370893..76c35f3bb 100644 --- a/google/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2015 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,15 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -"""Google Cloud Bigtable API package.""" - - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import BigtableTableAdminClient +from .async_client import BigtableTableAdminAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "BigtableTableAdminClient", + "BigtableTableAdminAsyncClient", +) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py new file mode 100644 index 000000000..19e9ee827 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -0,0 +1,2284 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .client import BigtableTableAdminClient + + +class BigtableTableAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + _client: BigtableTableAdminClient + + DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + + backup_path = staticmethod(BigtableTableAdminClient.backup_path) + parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) + cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + instance_path = staticmethod(BigtableTableAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) + snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) + parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) + table_path = staticmethod(BigtableTableAdminClient.table_path) + parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) + + common_billing_account_path = staticmethod( + BigtableTableAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableTableAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + BigtableTableAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + BigtableTableAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + BigtableTableAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) + parse_common_project_path = staticmethod( + BigtableTableAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) + parse_common_location_path = staticmethod( + BigtableTableAdminClient.parse_common_location_path + ) + + from_service_account_info = BigtableTableAdminClient.from_service_account_info + from_service_account_file = BigtableTableAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTableAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableTableAdminClient).get_transport_class, + type(BigtableTableAdminClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableTableAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_table( + self, + request: bigtable_table_admin.CreateTableRequest = None, + *, + parent: str = None, + table_id: str = None, + table: gba_table.Table = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (:class:`google.cloud.bigtable_admin_v2.types.Table`): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_table_from_snapshot( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + *, + parent: str = None, + table_id: str = None, + source_snapshot: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (:class:`str`): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table_from_snapshot, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + async def list_tables( + self, + request: bigtable_table_admin.ListTablesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesAsyncPager: + r"""Lists all tables served from a specified instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListTablesRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (:class:`str`): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListTablesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tables, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTablesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_table( + self, + request: bigtable_table_admin.GetTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (:class:`str`): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_table, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_table( + self, + request: bigtable_table_admin.DeleteTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (:class:`str`): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_table, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def modify_column_families( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + *, + name: str = None, + modifications: Sequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (:class:`str`): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (:class:`Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + if modifications: + request.modifications.extend(modifications) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.modify_column_families, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def drop_row_range( + self, + request: bigtable_table_admin.DropRowRangeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DropRowRangeRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.drop_row_range, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def generate_consistency_token( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (:class:`str`): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_consistency_token, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def check_consistency( + self, + request: bigtable_table_admin.CheckConsistencyRequest = None, + *, + name: str = None, + consistency_token: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (:class:`str`): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (:class:`str`): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_consistency, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def snapshot_table( + self, + request: bigtable_table_admin.SnapshotTableRequest = None, + *, + name: str = None, + cluster: str = None, + snapshot_id: str = None, + description: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.SnapshotTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`str`): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (:class:`str`): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (:class:`str`): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.snapshot_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + async def get_snapshot( + self, + request: bigtable_table_admin.GetSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_snapshot, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_snapshots( + self, + request: bigtable_table_admin.ListSnapshotsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsAsyncPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_snapshots, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSnapshotsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_snapshot( + self, + request: bigtable_table_admin.DeleteSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_snapshot, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_backup( + self, + request: bigtable_table_admin.CreateBackupRequest = None, + *, + parent: str = None, + backup_id: str = None, + backup: table.Backup = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateBackupRequest`): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (:class:`str`): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_backup( + self, + request: bigtable_table_admin.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetBackupRequest`): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_backup( + self, + request: bigtable_table_admin.UpdateBackupRequest = None, + *, + backup: table.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.UpdateBackupRequest`): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_backup( + self, + request: bigtable_table_admin.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteBackupRequest`): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_backups( + self, + request: bigtable_table_admin.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsAsyncPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListBackupsRequest`): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (:class:`str`): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def restore_table( + self, + request: bigtable_table_admin.RestoreTableRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.RestoreTableRequest`): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.restore_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableTableAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py new file mode 100644 index 000000000..58eb4a9cd --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -0,0 +1,2473 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableTableAdminGrpcTransport +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport + + +class BigtableTableAdminClientMeta(type): + """Metaclass for the BigtableTableAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[BigtableTableAdminTransport]] + _transport_registry["grpc"] = BigtableTableAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[BigtableTableAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTableAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def backup_path(project: str, instance: str, cluster: str, backup: str,) -> str: + """Return a fully-qualified backup string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + project=project, instance=instance, cluster=cluster, backup=backup, + ) + + @staticmethod + def parse_backup_path(path: str) -> Dict[str, str]: + """Parse a backup path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str, instance: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def snapshot_path(project: str, instance: str, cluster: str, snapshot: str,) -> str: + """Return a fully-qualified snapshot string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( + project=project, instance=instance, cluster=cluster, snapshot=snapshot, + ) + + @staticmethod + def parse_snapshot_path(path: str) -> Dict[str, str]: + """Parse a snapshot path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def table_path(project: str, instance: str, table: str,) -> str: + """Return a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str, str]: + """Parse a table path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableTableAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTableAdminTransport): + # transport is a BigtableTableAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_table( + self, + request: bigtable_table_admin.CreateTableRequest = None, + *, + parent: str = None, + table_id: str = None, + table: gba_table.Table = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableRequest): + request = bigtable_table_admin.CreateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_table_from_snapshot( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + *, + parent: str = None, + table_id: str = None, + source_snapshot: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableFromSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_table_from_snapshot + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + def list_tables( + self, + request: bigtable_table_admin.ListTablesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesPager: + r"""Lists all tables served from a specified instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (str): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListTablesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListTablesRequest): + request = bigtable_table_admin.ListTablesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tables] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTablesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_table( + self, + request: bigtable_table_admin.GetTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (str): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetTableRequest): + request = bigtable_table_admin.GetTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_table( + self, + request: bigtable_table_admin.DeleteTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (str): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteTableRequest): + request = bigtable_table_admin.DeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def modify_column_families( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + *, + name: str = None, + modifications: Sequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (str): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ModifyColumnFamiliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + if modifications: + request.modifications.extend(modifications) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.modify_column_families] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def drop_row_range( + self, + request: bigtable_table_admin.DropRowRangeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (google.cloud.bigtable_admin_v2.types.DropRowRangeRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DropRowRangeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.drop_row_range] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def generate_consistency_token( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GenerateConsistencyTokenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, bigtable_table_admin.GenerateConsistencyTokenRequest + ): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.generate_consistency_token + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def check_consistency( + self, + request: bigtable_table_admin.CheckConsistencyRequest = None, + *, + name: str = None, + consistency_token: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (str): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CheckConsistencyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_consistency] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def snapshot_table( + self, + request: bigtable_table_admin.SnapshotTableRequest = None, + *, + name: str = None, + cluster: str = None, + snapshot_id: str = None, + description: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (str): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (str): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.SnapshotTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.snapshot_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + def get_snapshot( + self, + request: bigtable_table_admin.GetSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_snapshots( + self, + request: bigtable_table_admin.ListSnapshotsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListSnapshotsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_snapshots] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSnapshotsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_snapshot( + self, + request: bigtable_table_admin.DeleteSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_backup( + self, + request: bigtable_table_admin.CreateBackupRequest = None, + *, + parent: str = None, + backup_id: str = None, + backup: table.Backup = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateBackupRequest): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (str): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + def get_backup( + self, + request: bigtable_table_admin.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetBackupRequest): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_backup( + self, + request: bigtable_table_admin.UpdateBackupRequest = None, + *, + backup: table.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.UpdateBackupRequest): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UpdateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_backup( + self, + request: bigtable_table_admin.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteBackupRequest): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (str): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_backups( + self, + request: bigtable_table_admin.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (str): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListBackupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def restore_table( + self, + request: bigtable_table_admin.RestoreTableRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (google.cloud.bigtable_admin_v2.types.RestoreTableRequest): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.RestoreTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.restore_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for `GetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for `SetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for + `TestIamPermissions` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (Sequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableTableAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py new file mode 100644 index 000000000..be7c121d7 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -0,0 +1,405 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table + + +class ListTablesPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListTablesResponse], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Table]: + for page in self.pages: + yield from page.tables + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTablesAsyncPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListTablesResponse]], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Table]: + async def async_generator(): + async for page in self.pages: + for response in page.tables: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSnapshotsPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListSnapshotsResponse], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Snapshot]: + for page in self.pages: + yield from page.snapshots + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSnapshotsAsyncPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListSnapshotsResponse]], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Snapshot]: + async def async_generator(): + async for page in self.pages: + for response in page.snapshots: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListBackupsResponse], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Backup]: + for page in self.pages: + yield from page.backups + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsAsyncPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListBackupsResponse]], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Backup]: + async def async_generator(): + async for page in self.pages: + for response in page.backups: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py new file mode 100644 index 000000000..8e9ae114d --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTableAdminTransport +from .grpc import BigtableTableAdminGrpcTransport +from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[BigtableTableAdminTransport]] +_transport_registry["grpc"] = BigtableTableAdminGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + +__all__ = ( + "BigtableTableAdminTransport", + "BigtableTableAdminGrpcTransport", + "BigtableTableAdminGrpcAsyncIOTransport", +) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py new file mode 100644 index 000000000..b54025c94 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -0,0 +1,517 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableTableAdminTransport(abc.ABC): + """Abstract transport class for BigtableTableAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_table: gapic_v1.method.wrap_method( + self.create_table, default_timeout=300.0, client_info=client_info, + ), + self.create_table_from_snapshot: gapic_v1.method.wrap_method( + self.create_table_from_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.list_tables: gapic_v1.method.wrap_method( + self.list_tables, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_table: gapic_v1.method.wrap_method( + self.get_table, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_table: gapic_v1.method.wrap_method( + self.delete_table, default_timeout=60.0, client_info=client_info, + ), + self.modify_column_families: gapic_v1.method.wrap_method( + self.modify_column_families, + default_timeout=300.0, + client_info=client_info, + ), + self.drop_row_range: gapic_v1.method.wrap_method( + self.drop_row_range, default_timeout=3600.0, client_info=client_info, + ), + self.generate_consistency_token: gapic_v1.method.wrap_method( + self.generate_consistency_token, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.check_consistency: gapic_v1.method.wrap_method( + self.check_consistency, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.snapshot_table: gapic_v1.method.wrap_method( + self.snapshot_table, default_timeout=None, client_info=client_info, + ), + self.get_snapshot: gapic_v1.method.wrap_method( + self.get_snapshot, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_snapshots: gapic_v1.method.wrap_method( + self.list_snapshots, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_snapshot: gapic_v1.method.wrap_method( + self.delete_snapshot, default_timeout=60.0, client_info=client_info, + ), + self.create_backup: gapic_v1.method.wrap_method( + self.create_backup, default_timeout=None, client_info=client_info, + ), + self.get_backup: gapic_v1.method.wrap_method( + self.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_backup: gapic_v1.method.wrap_method( + self.update_backup, default_timeout=60.0, client_info=client_info, + ), + self.delete_backup: gapic_v1.method.wrap_method( + self.delete_backup, default_timeout=60.0, client_info=client_info, + ), + self.list_backups: gapic_v1.method.wrap_method( + self.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.restore_table: gapic_v1.method.wrap_method( + self.restore_table, default_timeout=None, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=60.0, client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateTableRequest], + typing.Union[gba_table.Table, typing.Awaitable[gba_table.Table]], + ]: + raise NotImplementedError() + + @property + def create_table_from_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def list_tables( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListTablesRequest], + typing.Union[ + bigtable_table_admin.ListTablesResponse, + typing.Awaitable[bigtable_table_admin.ListTablesResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetTableRequest], + typing.Union[table.Table, typing.Awaitable[table.Table]], + ]: + raise NotImplementedError() + + @property + def delete_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteTableRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def modify_column_families( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + typing.Union[table.Table, typing.Awaitable[table.Table]], + ]: + raise NotImplementedError() + + @property + def drop_row_range( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DropRowRangeRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def generate_consistency_token( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + typing.Union[ + bigtable_table_admin.GenerateConsistencyTokenResponse, + typing.Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], + ], + ]: + raise NotImplementedError() + + @property + def check_consistency( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + typing.Union[ + bigtable_table_admin.CheckConsistencyResponse, + typing.Awaitable[bigtable_table_admin.CheckConsistencyResponse], + ], + ]: + raise NotImplementedError() + + @property + def snapshot_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.SnapshotTableRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetSnapshotRequest], + typing.Union[table.Snapshot, typing.Awaitable[table.Snapshot]], + ]: + raise NotImplementedError() + + @property + def list_snapshots( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + typing.Union[ + bigtable_table_admin.ListSnapshotsResponse, + typing.Awaitable[bigtable_table_admin.ListSnapshotsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateBackupRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetBackupRequest], + typing.Union[table.Backup, typing.Awaitable[table.Backup]], + ]: + raise NotImplementedError() + + @property + def update_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.UpdateBackupRequest], + typing.Union[table.Backup, typing.Awaitable[table.Backup]], + ]: + raise NotImplementedError() + + @property + def delete_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteBackupRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_backups( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListBackupsRequest], + typing.Union[ + bigtable_table_admin.ListBackupsResponse, + typing.Awaitable[bigtable_table_admin.ListBackupsResponse], + ], + ]: + raise NotImplementedError() + + @property + def restore_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.RestoreTableRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableTableAdminTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py new file mode 100644 index 000000000..4f54f3a7e --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -0,0 +1,944 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): + """gRPC backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_table( + self, + ) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table" not in self._stubs: + self._stubs["create_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs["create_table"] + + @property + def create_table_from_snapshot( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], operations.Operation + ]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table_from_snapshot" not in self._stubs: + self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_table_from_snapshot"] + + @property + def list_tables( + self, + ) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + bigtable_table_admin.ListTablesResponse, + ]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + ~.ListTablesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tables" not in self._stubs: + self._stubs["list_tables"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs["list_tables"] + + @property + def get_table( + self, + ) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table" not in self._stubs: + self._stubs["get_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["get_table"] + + @property + def delete_table( + self, + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty.Empty]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_table" not in self._stubs: + self._stubs["delete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_table"] + + @property + def modify_column_families( + self, + ) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "modify_column_families" not in self._stubs: + self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["modify_column_families"] + + @property + def drop_row_range( + self, + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty.Empty]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_row_range" not in self._stubs: + self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_row_range"] + + @property + def generate_consistency_token( + self, + ) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + bigtable_table_admin.GenerateConsistencyTokenResponse, + ]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + ~.GenerateConsistencyTokenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_consistency_token" not in self._stubs: + self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs["generate_consistency_token"] + + @property + def check_consistency( + self, + ) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + bigtable_table_admin.CheckConsistencyResponse, + ]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + ~.CheckConsistencyResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_consistency" not in self._stubs: + self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs["check_consistency"] + + @property + def snapshot_table( + self, + ) -> Callable[[bigtable_table_admin.SnapshotTableRequest], operations.Operation]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "snapshot_table" not in self._stubs: + self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["snapshot_table"] + + @property + def get_snapshot( + self, + ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + ~.Snapshot]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_snapshot" not in self._stubs: + self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs["get_snapshot"] + + @property + def list_snapshots( + self, + ) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + bigtable_table_admin.ListSnapshotsResponse, + ]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + ~.ListSnapshotsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_snapshots" not in self._stubs: + self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs["list_snapshots"] + + @property + def delete_snapshot( + self, + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty.Empty]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_snapshot" not in self._stubs: + self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_snapshot"] + + @property + def create_backup( + self, + ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations.Operation]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup( + self, + ) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup( + self, + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty.Empty]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + bigtable_table_admin.ListBackupsResponse, + ]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + ~.ListBackupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_table( + self, + ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations.Operation]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_table" not in self._stubs: + self._stubs["restore_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_table"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableTableAdminGrpcTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000..8e9197468 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -0,0 +1,962 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableTableAdminGrpcTransport + + +class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): + """gRPC AsyncIO backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + self._operations_client = None + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_table( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table] + ]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table" not in self._stubs: + self._stubs["create_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs["create_table"] + + @property + def create_table_from_snapshot( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table_from_snapshot" not in self._stubs: + self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_table_from_snapshot"] + + @property + def list_tables( + self, + ) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + Awaitable[bigtable_table_admin.ListTablesResponse], + ]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + Awaitable[~.ListTablesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tables" not in self._stubs: + self._stubs["list_tables"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs["list_tables"] + + @property + def get_table( + self, + ) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table" not in self._stubs: + self._stubs["get_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["get_table"] + + @property + def delete_table( + self, + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_table" not in self._stubs: + self._stubs["delete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_table"] + + @property + def modify_column_families( + self, + ) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table] + ]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "modify_column_families" not in self._stubs: + self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["modify_column_families"] + + @property + def drop_row_range( + self, + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_row_range" not in self._stubs: + self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_row_range"] + + @property + def generate_consistency_token( + self, + ) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], + ]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + Awaitable[~.GenerateConsistencyTokenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_consistency_token" not in self._stubs: + self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs["generate_consistency_token"] + + @property + def check_consistency( + self, + ) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + Awaitable[bigtable_table_admin.CheckConsistencyResponse], + ]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + Awaitable[~.CheckConsistencyResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_consistency" not in self._stubs: + self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs["check_consistency"] + + @property + def snapshot_table( + self, + ) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "snapshot_table" not in self._stubs: + self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["snapshot_table"] + + @property + def get_snapshot( + self, + ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + Awaitable[~.Snapshot]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_snapshot" not in self._stubs: + self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs["get_snapshot"] + + @property + def list_snapshots( + self, + ) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + Awaitable[bigtable_table_admin.ListSnapshotsResponse], + ]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + Awaitable[~.ListSnapshotsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_snapshots" not in self._stubs: + self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs["list_snapshots"] + + @property + def delete_snapshot( + self, + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_snapshot" not in self._stubs: + self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_snapshot"] + + @property + def create_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup( + self, + ) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup( + self, + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + Awaitable[bigtable_table_admin.ListBackupsResponse], + ]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + Awaitable[~.ListBackupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_table( + self, + ) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_table" not in self._stubs: + self._stubs["restore_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_table"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_admin_v2/types.py b/google/cloud/bigtable_admin_v2/types.py deleted file mode 100644 index 7dbb939d1..000000000 --- a/google/cloud/bigtable_admin_v2/types.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import common_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 -from google.type import expr_pb2 - - -_shared_modules = [ - iam_policy_pb2, - options_pb2, - policy_pb2, - operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, - expr_pb2, -] - -_local_modules = [ - bigtable_instance_admin_pb2, - bigtable_table_admin_pb2, - common_pb2, - instance_pb2, - table_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_admin_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py new file mode 100644 index 000000000..26c4b40c9 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .common import ( + OperationProgress, + StorageType, +) +from .instance import ( + Instance, + Cluster, + AppProfile, +) +from .bigtable_instance_admin import ( + CreateInstanceRequest, + GetInstanceRequest, + ListInstancesRequest, + ListInstancesResponse, + PartialUpdateInstanceRequest, + DeleteInstanceRequest, + CreateClusterRequest, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + DeleteClusterRequest, + CreateInstanceMetadata, + UpdateInstanceMetadata, + CreateClusterMetadata, + UpdateClusterMetadata, + CreateAppProfileRequest, + GetAppProfileRequest, + ListAppProfilesRequest, + ListAppProfilesResponse, + UpdateAppProfileRequest, + DeleteAppProfileRequest, + UpdateAppProfileMetadata, +) +from .table import ( + RestoreInfo, + Table, + ColumnFamily, + GcRule, + Snapshot, + Backup, + BackupInfo, + RestoreSourceType, +) +from .bigtable_table_admin import ( + RestoreTableRequest, + RestoreTableMetadata, + OptimizeRestoredTableMetadata, + CreateTableRequest, + CreateTableFromSnapshotRequest, + DropRowRangeRequest, + ListTablesRequest, + ListTablesResponse, + GetTableRequest, + DeleteTableRequest, + ModifyColumnFamiliesRequest, + GenerateConsistencyTokenRequest, + GenerateConsistencyTokenResponse, + CheckConsistencyRequest, + CheckConsistencyResponse, + SnapshotTableRequest, + GetSnapshotRequest, + ListSnapshotsRequest, + ListSnapshotsResponse, + DeleteSnapshotRequest, + SnapshotTableMetadata, + CreateTableFromSnapshotMetadata, + CreateBackupRequest, + CreateBackupMetadata, + UpdateBackupRequest, + GetBackupRequest, + DeleteBackupRequest, + ListBackupsRequest, + ListBackupsResponse, +) + +__all__ = ( + "OperationProgress", + "StorageType", + "Instance", + "Cluster", + "AppProfile", + "CreateInstanceRequest", + "GetInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DeleteClusterRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", + "CreateClusterMetadata", + "UpdateClusterMetadata", + "CreateAppProfileRequest", + "GetAppProfileRequest", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "UpdateAppProfileRequest", + "DeleteAppProfileRequest", + "UpdateAppProfileMetadata", + "RestoreInfo", + "Table", + "ColumnFamily", + "GcRule", + "Snapshot", + "Backup", + "BackupInfo", + "RestoreSourceType", + "RestoreTableRequest", + "RestoreTableMetadata", + "OptimizeRestoredTableMetadata", + "CreateTableRequest", + "CreateTableFromSnapshotRequest", + "DropRowRangeRequest", + "ListTablesRequest", + "ListTablesResponse", + "GetTableRequest", + "DeleteTableRequest", + "ModifyColumnFamiliesRequest", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "SnapshotTableRequest", + "GetSnapshotRequest", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "DeleteSnapshotRequest", + "SnapshotTableMetadata", + "CreateTableFromSnapshotMetadata", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", +) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py new file mode 100644 index 000000000..38ae3eab6 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -0,0 +1,530 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "CreateInstanceRequest", + "GetInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DeleteClusterRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", + "CreateClusterMetadata", + "UpdateClusterMetadata", + "CreateAppProfileRequest", + "GetAppProfileRequest", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "UpdateAppProfileRequest", + "DeleteAppProfileRequest", + "UpdateAppProfileMetadata", + }, +) + + +class CreateInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateInstance. + + Attributes: + parent (str): + Required. The unique name of the project in which to create + the new instance. Values are of the form + ``projects/{project}``. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + Required. The clusters to be created within the instance, + mapped by desired cluster ID, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. Currently, + at most four clusters can be specified. + """ + + parent = proto.Field(proto.STRING, number=1) + + instance_id = proto.Field(proto.STRING, number=2) + + instance = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Instance,) + + clusters = proto.MapField( + proto.STRING, proto.MESSAGE, number=4, message=gba_instance.Cluster, + ) + + +class GetInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetInstance. + + Attributes: + name (str): + Required. The unique name of the requested instance. Values + are of the form ``projects/{project}/instances/{instance}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListInstancesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListInstances. + + Attributes: + parent (str): + Required. The unique name of the project for which a list of + instances is requested. Values are of the form + ``projects/{project}``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListInstancesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListInstances. + + Attributes: + instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]): + The list of requested instances. + failed_locations (Sequence[str]): + Locations from which Instance information could not be + retrieved, due to an outage or some other transient + condition. Instances whose Clusters are all in one of the + failed locations may be missing from ``instances``, and + Instances with at least one Cluster in a failed location may + only have partial information returned. Values are of the + form ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + instances = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.Instance, + ) + + failed_locations = proto.RepeatedField(proto.STRING, number=2) + + next_page_token = proto.Field(proto.STRING, number=3) + + +class PartialUpdateInstanceRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + + Attributes: + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will (partially) + replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance fields which + should be replaced. Must be explicitly set. + """ + + instance = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Instance,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteInstance. + + Attributes: + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateCluster. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` rather + than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + """ + + parent = proto.Field(proto.STRING, number=1) + + cluster_id = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Cluster,) + + +class GetClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetCluster. + + Attributes: + name (str): + Required. The unique name of the requested cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListClustersRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListClusters. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances in a + project, e.g., ``projects/myproject/instances/-``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListClustersResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListClusters. + + Attributes: + clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]): + The list of requested clusters. + failed_locations (Sequence[str]): + Locations from which Cluster information could not be + retrieved, due to an outage or some other transient + condition. Clusters from these locations may be missing from + ``clusters``, or may only have partial information returned. + Values are of the form + ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.Cluster, + ) + + failed_locations = proto.RepeatedField(proto.STRING, number=2) + + next_page_token = proto.Field(proto.STRING, number=3) + + +class DeleteClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteCluster. + + Attributes: + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + The request that prompted the initiation of + this CreateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateInstanceRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class UpdateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + The request that prompted the initiation of + this UpdateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="PartialUpdateInstanceRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + The request that prompted the initiation of + this CreateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateClusterRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class UpdateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.Cluster): + The request that prompted the initiation of + this UpdateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message=gba_instance.Cluster, + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateAppProfile. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id (str): + Required. The ID to be used when referring to the new app + profile within its instance, e.g., just ``myprofile`` rather + than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + ignore_warnings (bool): + If true, ignore safety checks when creating + the app profile. + """ + + parent = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=2) + + app_profile = proto.Field(proto.MESSAGE, number=3, message=gba_instance.AppProfile,) + + ignore_warnings = proto.Field(proto.BOOL, number=4) + + +class GetAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetAppProfile. + + Attributes: + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListAppProfilesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of app profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListAppProfilesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]): + The list of requested app profiles. + next_page_token (str): + Set if not all app profiles could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + failed_locations (Sequence[str]): + Locations from which AppProfile information could not be + retrieved, due to an outage or some other transient + condition. AppProfiles from these locations may be missing + from ``app_profiles``. Values are of the form + ``projects//locations/`` + """ + + @property + def raw_page(self): + return self + + app_profiles = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.AppProfile, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + failed_locations = proto.RepeatedField(proto.STRING, number=3) + + +class UpdateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. + + Attributes: + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile fields + which should be replaced. If unset, all fields + will be replaced. + ignore_warnings (bool): + If true, ignore safety checks when updating + the app profile. + """ + + app_profile = proto.Field(proto.MESSAGE, number=1, message=gba_instance.AppProfile,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + ignore_warnings = proto.Field(proto.BOOL, number=3) + + +class DeleteAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. + + Attributes: + name (str): + Required. The unique name of the app profile to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + ignore_warnings (bool): + Required. If true, ignore safety checks when + deleting the app profile. + """ + + name = proto.Field(proto.STRING, number=1) + + ignore_warnings = proto.Field(proto.BOOL, number=2) + + +class UpdateAppProfileMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateAppProfile.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py new file mode 100644 index 000000000..ac146b798 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -0,0 +1,912 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "RestoreTableRequest", + "RestoreTableMetadata", + "OptimizeRestoredTableMetadata", + "CreateTableRequest", + "CreateTableFromSnapshotRequest", + "DropRowRangeRequest", + "ListTablesRequest", + "ListTablesResponse", + "GetTableRequest", + "DeleteTableRequest", + "ModifyColumnFamiliesRequest", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "SnapshotTableRequest", + "GetSnapshotRequest", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "DeleteSnapshotRequest", + "SnapshotTableMetadata", + "CreateTableFromSnapshotMetadata", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", + }, +) + + +class RestoreTableRequest(proto.Message): + r"""The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + parent (str): + Required. The name of the instance in which to create the + restored table. This instance must be the parent of the + source backup. Values are of the form + ``projects//instances/``. + table_id (str): + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + backup (str): + Name of the backup from which to restore. Values are of the + form + ``projects//instances//clusters//backups/``. + """ + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.STRING, number=3, oneof="source") + + +class RestoreTableMetadata(proto.Message): + r"""Metadata type for the long-running operation returned by + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + name (str): + Name of the table being created and restored + to. + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + + optimize_table_operation_name (str): + If exists, the name of the long-running operation that will + be used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable + after the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + operation. + """ + + name = proto.Field(proto.STRING, number=1) + + source_type = proto.Field(proto.ENUM, number=2, enum=gba_table.RestoreSourceType,) + + backup_info = proto.Field( + proto.MESSAGE, number=3, oneof="source_info", message=gba_table.BackupInfo, + ) + + optimize_table_operation_name = proto.Field(proto.STRING, number=4) + + progress = proto.Field(proto.MESSAGE, number=5, message=common.OperationProgress,) + + +class OptimizeRestoredTableMetadata(proto.Message): + r"""Metadata type for the long-running operation used to track + the progress of optimizations performed on a newly restored + table. This long-running operation is automatically created by + the system after the successful completion of a table restore, + and cannot be cancelled. + + Attributes: + name (str): + Name of the restored table being optimized. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the post-restore + optimizations. + """ + + name = proto.Field(proto.STRING, number=1) + + progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,) + + +class CreateTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + initial_splits (Sequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, + three tablets will be created, spanning the key ranges: + ``[, s1), [s1, s2), [s2, )``. + + Example: + + - Row keys := + ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 + ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` + """ + + class Split(proto.Message): + r"""An initial split point for a newly created table. + + Attributes: + key (bytes): + Row key to use as an initial tablet boundary. + """ + + key = proto.Field(proto.BYTES, number=1) + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + table = proto.Field(proto.MESSAGE, number=3, message=gba_table.Table,) + + initial_splits = proto.RepeatedField(proto.MESSAGE, number=4, message=Split,) + + +class CreateTableFromSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in the + same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + source_snapshot = proto.Field(proto.STRING, number=3) + + +class DropRowRangeRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + Attributes: + name (str): + Required. The unique name of the table on which to drop a + range of rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + row_key_prefix (bytes): + Delete all rows that start with this row key + prefix. Prefix cannot be zero length. + delete_all_data_from_table (bool): + Delete all rows in the table. Setting this to + false is a no-op. + """ + + name = proto.Field(proto.STRING, number=1) + + row_key_prefix = proto.Field(proto.BYTES, number=2, oneof="target") + + delete_all_data_from_table = proto.Field(proto.BOOL, number=3, oneof="target") + + +class ListTablesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + parent (str): + Required. The unique name of the instance for which tables + should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned tables' fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListTablesResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + tables (Sequence[google.cloud.bigtable_admin_v2.types.Table]): + The tables present in the requested instance. + next_page_token (str): + Set if not all tables could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + tables = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Table,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + Attributes: + name (str): + Required. The unique name of the requested table. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned table's fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + """ + + name = proto.Field(proto.STRING, number=1) + + view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) + + +class DeleteTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + Attributes: + name (str): + Required. The unique name of the table to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ModifyColumnFamiliesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + + Attributes: + name (str): + Required. The unique name of the table whose families should + be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be atomically + applied to the specified table's families. + Entries are applied in order, meaning that + earlier modifications can be masked by later + ones (in the case of repeated updates to the + same family, for example). + """ + + class Modification(proto.Message): + r"""A create, update, or delete of a particular column family. + + Attributes: + id (str): + The ID of the column family to be modified. + create (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Create a new column family with the specified + schema, or fail if one already exists with the + given ID. + update (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Update an existing column family to the + specified schema, or fail if no column family + exists with the given ID. + drop (bool): + Drop (delete) the column family with the + given ID, or fail if no such family exists. + """ + + id = proto.Field(proto.STRING, number=1) + + create = proto.Field( + proto.MESSAGE, number=2, oneof="mod", message=gba_table.ColumnFamily, + ) + + update = proto.Field( + proto.MESSAGE, number=3, oneof="mod", message=gba_table.ColumnFamily, + ) + + drop = proto.Field(proto.BOOL, number=4, oneof="mod") + + name = proto.Field(proto.STRING, number=1) + + modifications = proto.RepeatedField(proto.MESSAGE, number=2, message=Modification,) + + +class GenerateConsistencyTokenRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + name (str): + Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class GenerateConsistencyTokenResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + consistency_token (str): + The generated consistency token. + """ + + consistency_token = proto.Field(proto.STRING, number=1) + + +class CheckConsistencyRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + name (str): + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + """ + + name = proto.Field(proto.STRING, number=1) + + consistency_token = proto.Field(proto.STRING, number=2) + + +class CheckConsistencyResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + consistent (bool): + True only if the token is consistent. A token + is consistent if replication has caught up with + the restrictions specified in the request. + """ + + consistent = proto.Field(proto.BOOL, number=1) + + +class SnapshotTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the table to have the snapshot + taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): + Required. The name of the cluster where the snapshot will be + created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., ``mysnapshot`` + of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + ttl (google.protobuf.duration_pb2.Duration): + The amount of time that the new snapshot can + stay active after it is created. Once 'ttl' + expires, the snapshot will get deleted. The + maximum amount of time a snapshot can stay + active is 7 days. If 'ttl' is not specified, the + default value of 24 hours will be used. + description (str): + Description of the snapshot. + """ + + name = proto.Field(proto.STRING, number=1) + + cluster = proto.Field(proto.STRING, number=2) + + snapshot_id = proto.Field(proto.STRING, number=3) + + ttl = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + description = proto.Field(proto.STRING, number=5) + + +class GetSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the requested snapshot. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListSnapshotsRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the cluster for which snapshots + should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + page_size (int): + The maximum number of snapshots to return per + page. CURRENTLY UNIMPLEMENTED AND IGNORED. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListSnapshotsResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + snapshots (Sequence[google.cloud.bigtable_admin_v2.types.Snapshot]): + The snapshots present in the requested + cluster. + next_page_token (str): + Set if not all snapshots could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + snapshots = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_table.Snapshot, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class SnapshotTableMetadata(proto.Message): + r"""The metadata for the Operation returned by SnapshotTable. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + The request that prompted the initiation of + this SnapshotTable operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="SnapshotTableRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateTableFromSnapshotMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateTableFromSnapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + The request that prompted the initiation of + this CreateTableFromSnapshot operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateTableFromSnapshotRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateBackupRequest(proto.Message): + r"""The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + parent (str): + Required. This must be one of the clusters in the instance + in which this table is located. The backup will be stored in + this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are combined + as {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + backup_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.MESSAGE, number=3, message=gba_table.Backup,) + + +class CreateBackupMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + name (str): + The name of the backup being created. + source_table (str): + The name of the table the backup is created + from. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was cancelled. + """ + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.STRING, number=2) + + start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class UpdateBackupRequest(proto.Message): + r"""The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + + Attributes: + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: + + - ``backup.expire_time``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; + this prevents any future fields from being erased + accidentally by clients that do not know about them. + """ + + backup = proto.Field(proto.MESSAGE, number=1, message=gba_table.Backup,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class GetBackupRequest(proto.Message): + r"""The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + Attributes: + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class DeleteBackupRequest(proto.Message): + r"""The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + + Attributes: + name (str): + Required. Name of the backup to delete. Values are of the + form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListBackupsRequest(proto.Message): + r"""The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + parent (str): + Required. The cluster to list backups from. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters in + an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + filter (str): + A filter expression that filters backups listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be <, >, <=, >=, !=, + =, or :. Colon ':' represents a HAS operator which is + roughly synonymous with equality. Filter rules are case + insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each separate + expression within parentheses. By default, each expression + is an AND expression. However, you can include AND, OR, and + NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The backup's name is the string + "exact". + - ``name:howl`` --> The backup's name contains the string + "howl". + - ``source_table:prod`` --> The source_table's name + contains the string "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready + for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` + --> The backup name contains the string "howl" and + start_time of the backup is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is + greater than 10GB + order_by (str): + An expression for specifying the sort order of the results + of the request. The string value should specify one or more + fields in [Backup][google.bigtable.admin.v2.Backup]. The + full syntax is described at https://aip.dev/132#ordering. + + Fields supported are: \* name \* source_table \* expire_time + \* start_time \* end_time \* size_bytes \* state + + For example, "start_time". The default sorting order is + ascending. To specify descending order for the field, a + suffix " desc" should be appended to the field name. For + example, "start_time desc". Redundant space characters in + the syntax are insigificant. + + If order_by is empty, results will be sorted by + ``start_time`` in descending order starting from the most + recently created backup. + page_size (int): + Number of backups to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + from a previous + [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + order_by = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=5) + + +class ListBackupsResponse(proto.Message): + r"""The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + backups (Sequence[google.cloud.bigtable_admin_v2.types.Backup]): + The list of matching backups. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] + call to fetch more of the matching backups. + """ + + @property + def raw_page(self): + return self + + backups = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Backup,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/common.py b/google/cloud/bigtable_admin_v2/types/common.py new file mode 100644 index 000000000..43d500dc0 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/common.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", manifest={"StorageType", "OperationProgress",}, +) + + +class StorageType(proto.Enum): + r"""Storage media types for persisting Bigtable data.""" + STORAGE_TYPE_UNSPECIFIED = 0 + SSD = 1 + HDD = 2 + + +class OperationProgress(proto.Message): + r"""Encapsulates progress related information for a Cloud + Bigtable long running operation. + + Attributes: + progress_percent (int): + Percent completion of the operation. + Values are between 0 and 100 inclusive. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Time the request was received. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + failed or was completed successfully. + """ + + progress_percent = proto.Field(proto.INT32, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py new file mode 100644 index 000000000..ddef8a0d1 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import common + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", manifest={"Instance", "Cluster", "AppProfile",}, +) + + +class Instance(proto.Message): + r"""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] + and the resources that serve them. All tables in an instance are + served from all [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + Attributes: + name (str): + The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name (str): + Required. The descriptive name for this + instance as it appears in UIs. Can be changed at + any time, but should be kept globally unique to + avoid confusion. + state (google.cloud.bigtable_admin_v2.types.Instance.State): + (``OutputOnly``) The current state of the instance. + type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): + The type of the instance. Defaults to ``PRODUCTION``. + labels (Sequence[google.cloud.bigtable_admin_v2.types.Instance.LabelsEntry]): + Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a + customer's organizational needs and deployment strategies. + They can be used to filter resources and aggregate metrics. + + - Label keys must be between 1 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given + resource. + - Keys and values must both be under 128 bytes. + """ + + class State(proto.Enum): + r"""Possible states of an instance.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(proto.Enum): + r"""The type of the instance.""" + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + state = proto.Field(proto.ENUM, number=3, enum=State,) + + type_ = proto.Field(proto.ENUM, number=4, enum=Type,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=5) + + +class Cluster(proto.Message): + r"""A resizable group of nodes in a particular cloud location, capable + of serving all [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + Attributes: + name (str): + The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): + (``CreationOnly``) The location where this cluster's nodes + and storage reside. For best performance, clients should be + located as close as possible to this cluster. Currently only + zones are supported, so values should be of the form + ``projects/{project}/locations/{zone}``. + state (google.cloud.bigtable_admin_v2.types.Cluster.State): + The current state of the cluster. + serve_nodes (int): + Required. The number of nodes allocated to + this cluster. More nodes enable higher + throughput and more consistent performance. + default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): + (``CreationOnly``) The type of storage used by this cluster + to serve its parent instance's tables, unless explicitly + overridden. + """ + + class State(proto.Enum): + r"""Possible states of a cluster.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + name = proto.Field(proto.STRING, number=1) + + location = proto.Field(proto.STRING, number=2) + + state = proto.Field(proto.ENUM, number=3, enum=State,) + + serve_nodes = proto.Field(proto.INT32, number=4) + + default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) + + +class AppProfile(proto.Message): + r"""A configuration object describing how Cloud Bigtable should + treat traffic from a particular end user application. + + Attributes: + name (str): + (``OutputOnly``) The unique name of the app profile. Values + are of the form + ``projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + etag (str): + Strongly validated etag for optimistic concurrency control. + Preserve the value returned from ``GetAppProfile`` when + calling ``UpdateAppProfile`` to fail the request if there + has been a modification in the mean time. The + ``update_mask`` of the request need not include ``etag`` for + this protection to apply. See + `Wikipedia `__ and + `RFC + 7232 `__ + for more details. + description (str): + Optional long form description of the use + case for this AppProfile. + multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): + Use a multi-cluster routing policy. + single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): + Use a single-cluster routing policy. + """ + + class MultiClusterRoutingUseAny(proto.Message): + r"""Read/write requests are routed to the nearest cluster in the + instance, and will fail over to the nearest cluster that is + available in the event of transient errors or delays. Clusters + in a region are considered equidistant. Choosing this option + sacrifices read-your-writes consistency to improve availability. + """ + + class SingleClusterRouting(proto.Message): + r"""Unconditionally routes all read/write requests to a specific + cluster. This option preserves read-your-writes consistency but + does not improve availability. + + Attributes: + cluster_id (str): + The cluster to which read/write requests + should be routed. + allow_transactional_writes (bool): + Whether or not ``CheckAndMutateRow`` and + ``ReadModifyWriteRow`` requests are allowed by this app + profile. It is unsafe to send these requests to the same + table/row/column in multiple clusters. + """ + + cluster_id = proto.Field(proto.STRING, number=1) + + allow_transactional_writes = proto.Field(proto.BOOL, number=2) + + name = proto.Field(proto.STRING, number=1) + + etag = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + multi_cluster_routing_use_any = proto.Field( + proto.MESSAGE, + number=5, + oneof="routing_policy", + message=MultiClusterRoutingUseAny, + ) + + single_cluster_routing = proto.Field( + proto.MESSAGE, number=6, oneof="routing_policy", message=SingleClusterRouting, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py new file mode 100644 index 000000000..96d7750f7 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "RestoreSourceType", + "RestoreInfo", + "Table", + "ColumnFamily", + "GcRule", + "Snapshot", + "Backup", + "BackupInfo", + }, +) + + +class RestoreSourceType(proto.Enum): + r"""Indicates the type of the restore source.""" + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class RestoreInfo(proto.Message): + r"""Information about a table restore. + + Attributes: + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the backup used to restore + the table. The backup may no longer exist. + """ + + source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) + + backup_info = proto.Field( + proto.MESSAGE, number=2, oneof="source_info", message="BackupInfo", + ) + + +class Table(proto.Message): + r"""A collection of user data indexed by row, column, and + timestamp. Each table is served using the resources of its + parent cluster. + + Attributes: + name (str): + Output only. The unique name of the table. Values are of the + form + ``projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, + ``FULL`` + cluster_states (Sequence[google.cloud.bigtable_admin_v2.types.Table.ClusterStatesEntry]): + Output only. Map from cluster ID to per-cluster table state. + If it could not be determined whether or not the table has + data in a particular cluster (for example, if its zone is + unavailable), then there will be an entry for the cluster + with UNKNOWN ``replication_status``. Views: + ``REPLICATION_VIEW``, ``FULL`` + column_families (Sequence[google.cloud.bigtable_admin_v2.types.Table.ColumnFamiliesEntry]): + (``CreationOnly``) The column families configured for this + table, mapped by column family ID. Views: ``SCHEMA_VIEW``, + ``FULL`` + granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): + (``CreationOnly``) The granularity (i.e. ``MILLIS``) at + which timestamps are stored in this table. Timestamps not + matching the granularity will be rejected. If unspecified at + creation time, the value will be set to ``MILLIS``. Views: + ``SCHEMA_VIEW``, ``FULL``. + restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): + Output only. If this table was restored from + another data source (e.g. a backup), this field + will be populated with information about the + restore. + """ + + class TimestampGranularity(proto.Enum): + r"""Possible timestamp granularities to use when keeping multiple + versions of data in a table. + """ + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(proto.Enum): + r"""Defines a view over a table's fields.""" + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + FULL = 4 + + class ClusterState(proto.Message): + r"""The state of a table's data in a particular cluster. + + Attributes: + replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): + Output only. The state of replication for the + table in this cluster. + """ + + class ReplicationState(proto.Enum): + r"""Table replication states.""" + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 + READY_OPTIMIZING = 5 + + replication_state = proto.Field( + proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", + ) + + name = proto.Field(proto.STRING, number=1) + + cluster_states = proto.MapField( + proto.STRING, proto.MESSAGE, number=2, message=ClusterState, + ) + + column_families = proto.MapField( + proto.STRING, proto.MESSAGE, number=3, message="ColumnFamily", + ) + + granularity = proto.Field(proto.ENUM, number=4, enum=TimestampGranularity,) + + restore_info = proto.Field(proto.MESSAGE, number=6, message="RestoreInfo",) + + +class ColumnFamily(proto.Message): + r"""A set of columns within a table which share a common + configuration. + + Attributes: + gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): + Garbage collection rule specified as a + protobuf. Must serialize to at most 500 bytes. + NOTE: Garbage collection executes + opportunistically in the background, and so it's + possible for reads to return a cell even if it + matches the active GC expression for its family. + """ + + gc_rule = proto.Field(proto.MESSAGE, number=1, message="GcRule",) + + +class GcRule(proto.Message): + r"""Rule for determining which cells to delete during garbage + collection. + + Attributes: + max_num_versions (int): + Delete all cells in a column except the most + recent N. + max_age (google.protobuf.duration_pb2.Duration): + Delete cells in a column older than the given + age. Values must be at least one millisecond, + and will be truncated to microsecond + granularity. + intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): + Delete cells that would be deleted by every + nested rule. + union (google.cloud.bigtable_admin_v2.types.GcRule.Union): + Delete cells that would be deleted by any + nested rule. + """ + + class Intersection(proto.Message): + r"""A GcRule which deletes cells matching all of the given rules. + + Attributes: + rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Only delete cells which would be deleted by every element of + ``rules``. + """ + + rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + + class Union(proto.Message): + r"""A GcRule which deletes cells matching any of the given rules. + + Attributes: + rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Delete cells which would be deleted by any element of + ``rules``. + """ + + rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + + max_num_versions = proto.Field(proto.INT32, number=1, oneof="rule") + + max_age = proto.Field( + proto.MESSAGE, number=2, oneof="rule", message=duration.Duration, + ) + + intersection = proto.Field( + proto.MESSAGE, number=3, oneof="rule", message=Intersection, + ) + + union = proto.Field(proto.MESSAGE, number=4, oneof="rule", message=Union,) + + +class Snapshot(proto.Message): + r"""A snapshot of a table at a particular time. A snapshot can be + used as a checkpoint for data restoration or a data source for a + new table. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + name (str): + Output only. The unique name of the snapshot. Values are of + the form + ``projects//instances//clusters//snapshots/``. + source_table (google.cloud.bigtable_admin_v2.types.Table): + Output only. The source table at the time the + snapshot was taken. + data_size_bytes (int): + Output only. The size of the data in the + source table at the time the snapshot was taken. + In some cases, this value may be computed + asynchronously via a background process and a + placeholder of 0 will be used in the meantime. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the snapshot is + created. + delete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the snapshot will + be deleted. The maximum amount of time a + snapshot can stay active is 365 days. If 'ttl' + is not specified, the default maximum of 365 + days will be used. + state (google.cloud.bigtable_admin_v2.types.Snapshot.State): + Output only. The current state of the + snapshot. + description (str): + Output only. Description of the snapshot. + """ + + class State(proto.Enum): + r"""Possible states of a snapshot.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.MESSAGE, number=2, message="Table",) + + data_size_bytes = proto.Field(proto.INT64, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + delete_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + description = proto.Field(proto.STRING, number=7) + + +class Backup(proto.Message): + r"""A backup of a Cloud Bigtable table. + + Attributes: + name (str): + Output only. A globally unique identifier for the backup + which cannot be changed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + The final segment of the name must be between 1 and 50 + characters in length. + + The backup is stored in the cluster identified by the prefix + of the backup name of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + source_table (str): + Required. Immutable. Name of the table from which this + backup was created. This needs to be in the same instance as + the backup. Values are of the form + ``projects/{project}/instances/{instance}/tables/{source_table}``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. The expiration time of the backup, with + microseconds granularity that must be at least 6 hours and + at most 30 days from the time the request is received. Once + the ``expire_time`` has passed, Cloud Bigtable will delete + the backup and free the resources used by the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``start_time`` is the time that the backup was + started (i.e. approximately the time the + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] + request is received). The row data in this backup will be no + older than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``end_time`` is the time that the backup was + finished. The row data in the backup will be no newer than + this timestamp. + size_bytes (int): + Output only. Size of the backup in bytes. + state (google.cloud.bigtable_admin_v2.types.Backup.State): + Output only. The current state of the backup. + """ + + class State(proto.Enum): + r"""Indicates the current state of the backup.""" + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.STRING, number=2) + + expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + size_bytes = proto.Field(proto.INT64, number=6) + + state = proto.Field(proto.ENUM, number=7, enum=State,) + + +class BackupInfo(proto.Message): + r"""Information about a backup. + + Attributes: + backup (str): + Output only. Name of the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time that the backup was + started. Row data in the backup will be no older + than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. This time that the backup was + finished. Row data in the backup will be no + newer than this timestamp. + source_table (str): + Output only. Name of the table the backup was + created from. + """ + + backup = proto.Field(proto.STRING, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + source_table = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 8c31017cc..0ab15791b 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -1,42 +1,71 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_v2 import types -from google.cloud.bigtable_v2.gapic import bigtable_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableClient(bigtable_client.BigtableClient): - __doc__ = bigtable_client.BigtableClient.__doc__ +from .services.bigtable import BigtableClient +from .types.bigtable import CheckAndMutateRowRequest +from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import MutateRowRequest +from .types.bigtable import MutateRowResponse +from .types.bigtable import MutateRowsRequest +from .types.bigtable import MutateRowsResponse +from .types.bigtable import ReadModifyWriteRowRequest +from .types.bigtable import ReadModifyWriteRowResponse +from .types.bigtable import ReadRowsRequest +from .types.bigtable import ReadRowsResponse +from .types.bigtable import SampleRowKeysRequest +from .types.bigtable import SampleRowKeysResponse +from .types.data import Cell +from .types.data import Column +from .types.data import ColumnRange +from .types.data import Family +from .types.data import Mutation +from .types.data import ReadModifyWriteRule +from .types.data import Row +from .types.data import RowFilter +from .types.data import RowRange +from .types.data import RowSet +from .types.data import TimestampRange +from .types.data import ValueRange __all__ = ( - "types", + "Cell", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "Column", + "ColumnRange", + "Family", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "Mutation", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", + "ReadModifyWriteRule", + "ReadRowsRequest", + "ReadRowsResponse", + "Row", + "RowFilter", + "RowRange", + "RowSet", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "TimestampRange", + "ValueRange", "BigtableClient", ) diff --git a/google/cloud/bigtable_v2/gapic/__init__.py b/google/cloud/bigtable_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client.py b/google/cloud/bigtable_v2/gapic/bigtable_client.py deleted file mode 100644 index a9ddfad8a..000000000 --- a/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ /dev/null @@ -1,779 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.v2 Bigtable API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_v2.gapic import bigtable_client_config -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc -from google.cloud.bigtable_v2.proto import data_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableClient(object): - """Service for reading from and writing to existing Bigtable tables.""" - - SERVICE_ADDRESS = "bigtable.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.v2.Bigtable" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_grpc_transport.BigtableGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def read_rows( - self, - table_name, - app_profile_id=None, - rows=None, - filter_=None, - rows_limit=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.read_rows(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to read. Values - are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowSet` - filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, - reads the entirety of each row. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - rows_limit (long): The read will terminate after committing to N rows' worth of results. The - default (zero) is to return all results. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "read_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_rows, - default_retry=self._method_configs["ReadRows"].retry, - default_timeout=self._method_configs["ReadRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadRowsRequest( - table_name=table_name, - app_profile_id=app_profile_id, - rows=rows, - filter=filter_, - rows_limit=rows_limit, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def sample_row_keys( - self, - table_name, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.sample_row_keys(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to sample row - keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "sample_row_keys" not in self._inner_api_calls: - self._inner_api_calls[ - "sample_row_keys" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.sample_row_keys, - default_retry=self._method_configs["SampleRowKeys"].retry, - default_timeout=self._method_configs["SampleRowKeys"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["sample_row_keys"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_row( - self, - table_name, - row_key, - mutations, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `mutations`: - >>> mutations = [] - >>> - >>> response = client.mutate_row(table_name, row_key, mutations) - - Args: - table_name (str): Required. The unique name of the table to which the mutation should - be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the mutation should be applied. - mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied - in order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_row, - default_retry=self._method_configs["MutateRow"].retry, - default_timeout=self._method_configs["MutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowRequest( - table_name=table_name, - row_key=row_key, - mutations=mutations, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_rows( - self, - table_name, - entries, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `entries`: - >>> entries = [] - >>> - >>> for element in client.mutate_rows(table_name, entries): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table to which the mutations should be applied. - entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. - Each entry is applied as an atomic mutation, but the entries may be - applied in arbitrary order (even between entries for the same row). - At least one entry must be specified, and in total the entries can - contain at most 100000 mutations. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Entry` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_rows, - default_retry=self._method_configs["MutateRows"].retry, - default_timeout=self._method_configs["MutateRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, - entries=entries, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_and_mutate_row( - self, - table_name, - row_key, - app_profile_id=None, - predicate_filter=None, - true_mutations=None, - false_mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically based on the output of a predicate Reader filter. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> response = client.check_and_mutate_row(table_name, row_key) - - Args: - table_name (str): Required. The unique name of the table to which the conditional - mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If unset, - checks that the row contains any values at all. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``false_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``true_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_and_mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "check_and_mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_and_mutate_row, - default_retry=self._method_configs["CheckAndMutateRow"].retry, - default_timeout=self._method_configs["CheckAndMutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, - row_key=row_key, - app_profile_id=app_profile_id, - predicate_filter=predicate_filter, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_and_mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read_modify_write_row( - self, - table_name, - row_key, - rules, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `rules`: - >>> rules = [] - >>> - >>> response = client.read_modify_write_row(table_name, row_key, rules) - - Args: - table_name (str): Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. - rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed - into writes. Entries are applied in order, meaning that earlier rules will - affect the results of later ones. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_modify_write_row" not in self._inner_api_calls: - self._inner_api_calls[ - "read_modify_write_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_modify_write_row, - default_retry=self._method_configs["ReadModifyWriteRow"].retry, - default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, - row_key=row_key, - rules=rules, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_modify_write_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/google/cloud/bigtable_v2/gapic/bigtable_client_config.py deleted file mode 100644 index 8a57847bf..000000000 --- a/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ /dev/null @@ -1,80 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.v2.Bigtable": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 20000, - }, - "read_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 43200000, - }, - "mutate_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "ReadRows": { - "timeout_millis": 43200000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "read_rows_params", - }, - "SampleRowKeys": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "MutateRow": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "MutateRows": { - "timeout_millis": 600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "mutate_rows_params", - }, - "CheckAndMutateRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ReadModifyWriteRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/google/cloud/bigtable_v2/gapic/transports/__init__.py b/google/cloud/bigtable_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py deleted file mode 100644 index 5b2757db2..000000000 --- a/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc - - -class BigtableGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.v2 Bigtable API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtable.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), - } - - @classmethod - def create_channel( - cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def read_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.read_rows`. - - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadRows - - @property - def sample_row_keys(self): - """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. - - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].SampleRowKeys - - @property - def mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. - - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRow - - @property - def mutate_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. - - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRows - - @property - def check_and_mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. - - Mutates a row atomically based on the output of a predicate Reader filter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].CheckAndMutateRow - - @property - def read_modify_write_row(self): - """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. - - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/google/cloud/bigtable_v2/proto/__init__.py b/google/cloud/bigtable_v2/proto/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto b/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto b/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc463..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto b/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_data.proto b/google/cloud/bigtable_v2/proto/bigtable_data.proto deleted file mode 100644 index bd063a925..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_data.proto +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto b/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto deleted file mode 100644 index ec992ea0f..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables' metadata or data stored in those tables. -service BigtableInstanceAdmin { - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*}/instances" - body: "*" - }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*}" - }; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*}/instances" - }; - } - - // Updates an instance within a project. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*}" - body: "*" - }; - } - - // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{instance.name=projects/*/instances/*}" - body: "instance" - }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*}" - }; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/clusters" - body: "cluster" - }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/clusters" - }; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*/clusters/*}" - body: "*" - }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Creates an app profile within an instance. - rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/appProfiles" - body: "app_profile" - }; - } - - // Gets information about an app profile. - rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/appProfiles" - }; - } - - // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - body: "app_profile" - }; - } - - // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form `projects/`. - string parent = 1; - - // The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than - // `projects/myproject/instances/myinstance`. - string instance_id = 2; - - // The instance to create. - // Fields marked `OutputOnly` must be left blank. - Instance instance = 3; - - // The clusters to be created within the instance, mapped by desired - // cluster ID, e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fields marked `OutputOnly` must be left blank. - // Currently, at most two clusters can be specified. - map clusters = 4; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least one - // Cluster in a failed location may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.PartialUpdateInstance. -message PartialUpdateInstanceRequest { - // The Instance which will (partially) replace the current value. - Instance instance = 1; - - // The subset of Instance fields which should be replaced. - // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2; - - // The cluster to be created. - // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects//instances/`. - // Use ` = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateInstance. -message UpdateInstanceMetadata { - // The request that prompted the initiation of this UpdateInstance operation. - PartialUpdateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateCluster. -message CreateClusterMetadata { - // The request that prompted the initiation of this CreateCluster operation. - CreateClusterRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Request message for BigtableInstanceAdmin.CreateAppProfile. -message CreateAppProfileRequest { - // The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than - // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2; - - // The app profile to be created. - // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3; - - // If true, ignore safety checks when creating the app profile. - bool ignore_warnings = 4; -} - -// Request message for BigtableInstanceAdmin.GetAppProfile. -message GetAppProfileRequest { - // The unique name of the requested app profile. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesRequest { - // The unique name of the instance for which a list of app profiles is - // requested. Values are of the form - // `projects//instances/`. - // Use ` = '-'` to list AppProfiles for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 3; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesResponse { - // The list of requested app profiles. - repeated AppProfile app_profiles = 1; - - // Set if not all app profiles could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; - - // Locations from which AppProfile information could not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from `app_profiles`. - // Values are of the form `projects//locations/` - repeated string failed_locations = 3; -} - -// Request message for BigtableInstanceAdmin.UpdateAppProfile. -message UpdateAppProfileRequest { - // The app profile which will (partially) replace the current value. - AppProfile app_profile = 1; - - // The subset of app profile fields which should be replaced. - // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2; - - // If true, ignore safety checks when updating the app profile. - bool ignore_warnings = 3; -} - - -// Request message for BigtableInstanceAdmin.DeleteAppProfile. -message DeleteAppProfileRequest { - // The unique name of the app profile to be deleted. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; - - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; -} - -// The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/google/cloud/bigtable_v2/proto/bigtable_pb2.py deleted file mode 100644 index f6d825d89..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ /dev/null @@ -1,1804 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/bigtable.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_v2.proto import ( - data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/bigtable.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\x93\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_READROWSREQUEST = _descriptor.Descriptor( - name="ReadRowsRequest", - full_name="google.bigtable.v2.ReadRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.bigtable.v2.ReadRowsRequest.rows", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.v2.ReadRowsRequest.filter", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows_limit", - full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", - index=4, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=285, - serialized_end=494, -) - - -_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name="CellChunk", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", - index=5, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_size", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", - index=6, - number=7, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="reset_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", - index=7, - number=8, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="commit_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", - index=8, - number=9, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_status", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=612, - serialized_end=873, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name="ReadRowsResponse", - full_name="google.bigtable.v2.ReadRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chunks", - full_name="google.bigtable.v2.ReadRowsResponse.chunks", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="last_scanned_row_key", - full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _READROWSRESPONSE_CELLCHUNK, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=497, - serialized_end=873, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name="SampleRowKeysRequest", - full_name="google.bigtable.v2.SampleRowKeysRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=875, - serialized_end=980, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name="SampleRowKeysResponse", - full_name="google.bigtable.v2.SampleRowKeysResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="offset_bytes", - full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=982, - serialized_end=1044, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name="MutateRowRequest", - full_name="google.bigtable.v2.MutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowRequest.mutations", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1047, - serialized_end=1224, -) - - -_MUTATEROWRESPONSE = _descriptor.Descriptor( - name="MutateRowResponse", - full_name="google.bigtable.v2.MutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1226, - serialized_end=1245, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsRequest.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1419, - serialized_end=1497, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name="MutateRowsRequest", - full_name="google.bigtable.v2.MutateRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsRequest.entries", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATEROWSREQUEST_ENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1248, - serialized_end=1497, -) - - -_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsResponse.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1585, - serialized_end=1643, -) - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name="MutateRowsResponse", - full_name="google.bigtable.v2.MutateRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsResponse.entries", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATEROWSRESPONSE_ENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1500, - serialized_end=1643, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name="CheckAndMutateRowRequest", - full_name="google.bigtable.v2.CheckAndMutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", - index=1, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1646, - serialized_end=1943, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name="CheckAndMutateRowResponse", - full_name="google.bigtable.v2.CheckAndMutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_matched", - full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1945, - serialized_end=1999, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name="ReadModifyWriteRowRequest", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2002, - serialized_end=2195, -) - - -_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name="ReadModifyWriteRowResponse", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2197, - serialized_end=2263, -) - -_READROWSREQUEST.fields_by_name[ - "rows" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name[ - "filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "family_name" -].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "qualifier" -].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE -_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "reset_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "commit_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "predicate_filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "true_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "false_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name[ - "rules" -].message_type = ( - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE -) -_READMODIFYWRITEROWRESPONSE.fields_by_name[ - "row" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "CheckAndMutateRowResponse" -] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowRequest" -] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowResponse" -] = _READMODIFYWRITEROWRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType( - "ReadRowsRequest", - (_message.Message,), - { - "DESCRIPTOR": _READROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadRows. - - Attributes: - table_name: - Required. The unique name of the table from which to read. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - rows: - The row keys and/or ranges to read. If not specified, reads - from all rows. - filter: - The filter to apply to the contents of the specified row(s). - If unset, reads the entirety of each row. - rows_limit: - The read will terminate after committing to N rows’ worth of - results. The default (zero) is to return all results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - }, -) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType( - "ReadRowsResponse", - (_message.Message,), - { - "CellChunk": _reflection.GeneratedProtocolMessageType( - "CellChunk", - (_message.Message,), - { - "DESCRIPTOR": _READROWSRESPONSE_CELLCHUNK, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Specifies a piece of a row’s contents returned as part of the read - response stream. - - Attributes: - row_key: - The row key for this chunk of data. If the row key is empty, - this CellChunk is a continuation of the same row as the - previous CellChunk in the response stream, even if that - CellChunk was in a previous ReadRowsResponse message. - family_name: - The column family name for this chunk of data. If this message - is not present this CellChunk is a continuation of the same - column family as the previous CellChunk. The empty string can - occur as a column family name in a response so clients must - check explicitly for the presence of this message, not just - for ``family_name.value`` being non-empty. - qualifier: - The column qualifier for this chunk of data. If this message - is not present, this CellChunk is a continuation of the same - column as the previous CellChunk. Column qualifiers may be - empty so clients must check for the presence of this message, - not just for ``qualifier.value`` being non-empty. - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. Timestamps are only set in the first CellChunk per - cell (for cells split into multiple chunks). - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - on the first CellChunk per cell. - value: - The value stored in the cell. Cell values can be split across - multiple CellChunks. In that case only the value field will be - set in CellChunks after the first: the timestamp and labels - will only be present in the first CellChunk, even if the first - CellChunk came in a previous ReadRowsResponse. - value_size: - If this CellChunk is part of a chunked cell value and this is - not the final chunk of that cell, value_size will be set to - the total length of the cell value. The client can use this - size to pre-allocate memory to hold the full cell value. - row_status: - Signals to the client concerning previous CellChunks received. - reset_row: - Indicates that the client should drop all previous chunks for - ``row_key``, as it will be re-read from the beginning. - commit_row: - Indicates that the client can safely process all previous - chunks for ``row_key``, as its data has been fully read. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - }, - ), - "DESCRIPTOR": _READROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadRows. - - Attributes: - chunks: - A collection of a row’s contents as part of the read request. - last_scanned_row_key: - Optionally the server might return the row key of the last row - it has scanned. The client can use this to construct a more - efficient retry request if needed: any row keys or portions of - ranges less than this row key can be dropped from the request. - This is primarily useful for cases where the server has read a - lot of data that was filtered out since the last committed row - key, allowing the client to skip that work on a retry. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - }, -) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysRequest", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.SampleRowKeys. - - Attributes: - table_name: - Required. The unique name of the table from which to sample - row keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - }, -) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysResponse", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.SampleRowKeys. - - Attributes: - row_key: - Sorted streamed sequence of sample row keys in the table. The - table might have contents before the first row key in the list - and after the last one, but a key containing the empty string - indicates “end of table” and will be the last response given, - if present. Note that row keys in this list may not have ever - been written to or read from, and users should therefore not - make any assumptions about the row key structure that are - specific to their use case. - offset_bytes: - Approximate total storage space used by all rows in the table - which precede ``row_key``. Buffering the contents of all rows - between two subsequent samples would require space roughly - equal to the difference in their ``offset_bytes`` fields. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - }, -) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.MutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the mutation should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least - one entry and at most 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - }, -) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.MutateRow.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - }, -) -_sym_db.RegisterMessage(MutateRowResponse) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowsRequest", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """A mutation for a given row. - - Attributes: - row_key: - The key of the row to which the ``mutations`` should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Mutations are applied in order, meaning that earlier - mutations can be masked by later ones. You must specify at - least one mutation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for BigtableService.MutateRows. - - Attributes: - table_name: - Required. The unique name of the table to which the mutations - should be applied. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - entries: - Required. The row keys and corresponding mutations to be - applied in bulk. Each entry is applied as an atomic mutation, - but the entries may be applied in arbitrary order (even - between entries for the same row). At least one entry must be - specified, and in total the entries can contain at most 100000 - mutations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - }, -) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowsResponse", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """The result of applying a passed mutation in the original request. - - Attributes: - index: - The index into the original request’s ``entries`` list of the - Entry for which a result is being reported. - status: - The result of the request Entry identified by ``index``. - Depending on how requests are batched during execution, it is - possible for one Entry to fail due to an error with another - Entry. In the event that this occurs, the same error will be - reported for both entries. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for BigtableService.MutateRows. - - Attributes: - entries: - One or more results for Entries from the batch request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - }, -) -_sym_db.RegisterMessage(MutateRowsResponse) -_sym_db.RegisterMessage(MutateRowsResponse.Entry) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.CheckAndMutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the conditional mutation - should be applied. - predicate_filter: - The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If - unset, checks that the row contains any values at all. - true_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``false_mutations`` is empty, and at most - 100000. - false_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``true_mutations`` is empty, and at most - 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.CheckAndMutateRow. - - Attributes: - predicate_matched: - Whether or not the request’s ``predicate_filter`` yielded any - results for the specified row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadModifyWriteRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the read/modify/write - rules should be applied. - rules: - Required. Rules specifying how the specified row’s contents - are to be transformed into writes. Entries are applied in - order, meaning that earlier rules will affect the results of - later ones. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadModifyWriteRow. - - Attributes: - row: - A Row containing the new contents of all cells modified by the - request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowResponse) - - -DESCRIPTOR._options = None -_READROWSREQUEST.fields_by_name["table_name"]._options = None -_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None - -_BIGTABLE = _descriptor.ServiceDescriptor( - name="Bigtable", - full_name="google.bigtable.v2.Bigtable", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=2266, - serialized_end=4126, - methods=[ - _descriptor.MethodDescriptor( - name="ReadRows", - full_name="google.bigtable.v2.Bigtable.ReadRows", - index=0, - containing_service=None, - input_type=_READROWSREQUEST, - output_type=_READROWSRESPONSE, - serialized_options=b'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SampleRowKeys", - full_name="google.bigtable.v2.Bigtable.SampleRowKeys", - index=1, - containing_service=None, - input_type=_SAMPLEROWKEYSREQUEST, - output_type=_SAMPLEROWKEYSRESPONSE, - serialized_options=b"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRow", - full_name="google.bigtable.v2.Bigtable.MutateRow", - index=2, - containing_service=None, - input_type=_MUTATEROWREQUEST, - output_type=_MUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRows", - full_name="google.bigtable.v2.Bigtable.MutateRows", - index=3, - containing_service=None, - input_type=_MUTATEROWSREQUEST, - output_type=_MUTATEROWSRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckAndMutateRow", - full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", - index=4, - containing_service=None, - input_type=_CHECKANDMUTATEROWREQUEST, - output_type=_CHECKANDMUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ReadModifyWriteRow", - full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", - index=5, - containing_service=None, - input_type=_READMODIFYWRITEROWREQUEST, - output_type=_READMODIFYWRITEROWRESPONSE, - serialized_options=b"\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLE) - -DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py deleted file mode 100644 index 2a094a7f9..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ /dev/null @@ -1,313 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, -) - - -class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/ReadRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - "/google.bigtable.v2.Bigtable/SampleRowKeys", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/MutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/MutateRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - ) - - -class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables.""" - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - "ReadRows": grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, - ), - "SampleRowKeys": grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, - ), - "MutateRow": grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, - ), - "MutateRows": grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, - ), - "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, - ), - "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.v2.Bigtable", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Bigtable(object): - """Service for reading from and writing to existing Bigtable tables.""" - - @staticmethod - def ReadRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/ReadRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SampleRowKeys( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/SampleRowKeys", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckAndMutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ReadModifyWriteRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/bigtable_v2/proto/bigtable_service.proto b/google/cloud/bigtable_v2/proto/bigtable_service.proto deleted file mode 100644 index b1f729517..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_service.proto +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/v1/bigtable_data.proto"; -import "google/bigtable/v1/bigtable_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" - body: "*" - }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { - get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" - }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" - body: "*" - }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" - body: "*" - }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" - body: "*" - }; - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" - body: "*" - }; - } -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto b/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto deleted file mode 100644 index d734ececa..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/bigtable/v1/bigtable_data.proto"; -import "google/rpc/status.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - - // A set of rows from which to read. Entries need not be in order, and will - // be deduplicated before reading. - // The total serialized size of the set must not exceed 1MB. - RowSet row_set = 8; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // At least one mutation must be specified. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys/mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The results for each Entry from the request, presented in the order - // in which the entries were originally given. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - repeated google.rpc.Status statuses = 1; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto b/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto deleted file mode 100644 index 2d5bddf30..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// -// -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables" - body: "*" - }; - } - - // Creates a new table from the specified snapshot. The target table must - // not exist. The snapshot and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - body: "*" - }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/tables" - }; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Performs a series of column family modifications on the specified table. - // Either all or none of the modifications will occur before this method - // returns, but data requests received prior to that point may see a table - // where only some modifications have taken effect. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - body: "*" - }; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" - body: "*" - }; - } - - // Generates a consistency token for a Table, which can be used in - // CheckConsistency to check whether mutations to the table that finished - // before this call started have been replicated. The tokens will be available - // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - body: "*" - }; - } - - // Checks replication consistency based on a consistency token, that is, if - // replication has caught up based on the conditions specified in the token - // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - body: "*" - }; - } - - // Creates a new snapshot in the specified cluster from the specified - // source table. The cluster and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" - body: "*" - }; - } - - // Gets metadata information about the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } - - // Lists all snapshots associated with the specified cluster. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - }; - } - - // Permanently deletes the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The Table to create. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - repeated Split initial_splits = 4; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotRequest { - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string source_snapshot = 3; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Delete all rows or by prefix. - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The view to be applied to the returned tables' fields. - // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. - Table.View view = 2; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 4; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested instance. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // The unique name of the requested table. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The view to be applied to the returned table's fields. - // Defaults to `SCHEMA_VIEW` if unspecified. - Table.View view = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // The unique name of the table to be deleted. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - // Column familiy modifications. - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // The unique name of the table whose families should be modified. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenRequest { - // The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenResponse { - // The generated consistency token. - string consistency_token = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyRequest { - // The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyResponse { - // True only if the token is consistent. A token is consistent if replication - // has caught up with the restrictions specified in the request. - bool consistent = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableRequest { - // The unique name of the table to have the snapshot taken. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The name of the cluster where the snapshot will be created in. - // Values are of the form - // `projects//instances//clusters/`. - string cluster = 2; - - // The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects//instances//clusters//snapshots/mysnapshot`. - string snapshot_id = 3; - - // The amount of time that the new snapshot can stay active after it is - // created. Once 'ttl' expires, the snapshot will get deleted. The maximum - // amount of time a snapshot can stay active is 7 days. If 'ttl' is not - // specified, the default value of 24 hours will be used. - google.protobuf.Duration ttl = 4; - - // Description of the snapshot. - string description = 5; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message GetSnapshotRequest { - // The unique name of the requested snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsRequest { - // The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects//instances//clusters/`. - // Use ` = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects//instances//clusters/-`. - string parent = 1; - - // The maximum number of snapshots to return per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsResponse { - // The snapshots present in the requested cluster. - repeated Snapshot snapshots = 1; - - // Set if not all snapshots could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message DeleteSnapshotRequest { - // The unique name of the snapshot to be deleted. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// The metadata for the Operation returned by SnapshotTable. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableMetadata { - // The request that prompted the initiation of this SnapshotTable operation. - SnapshotTableRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateTableFromSnapshot. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotMetadata { - // The request that prompted the initiation of this CreateTableFromSnapshot - // operation. - CreateTableFromSnapshotRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_table_data.proto b/google/cloud/bigtable_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f5..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_table_service.proto b/google/cloud/bigtable_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee1..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto b/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede655..000000000 --- a/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/google/cloud/bigtable_v2/proto/common.proto b/google/cloud/bigtable_v2/proto/common.proto deleted file mode 100644 index 0ece12780..000000000 --- a/google/cloud/bigtable_v2/proto/common.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} diff --git a/google/cloud/bigtable_v2/proto/data_pb2.py b/google/cloud/bigtable_v2/proto/data_pb2.py deleted file mode 100644 index 5f62756a8..000000000 --- a/google/cloud/bigtable_v2/proto/data_pb2.py +++ /dev/null @@ -1,2672 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/data.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/data.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3', -) - - -_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.bigtable.v2.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.v2.Row.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="families", - full_name="google.bigtable.v2.Row.families", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=65, - serialized_end=129, -) - - -_FAMILY = _descriptor.Descriptor( - name="Family", - full_name="google.bigtable.v2.Family", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.v2.Family.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.bigtable.v2.Family.columns", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=131, - serialized_end=198, -) - - -_COLUMN = _descriptor.Descriptor( - name="Column", - full_name="google.bigtable.v2.Column", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.Column.qualifier", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells", - full_name="google.bigtable.v2.Column.cells", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=200, - serialized_end=268, -) - - -_CELL = _descriptor.Descriptor( - name="Cell", - full_name="google.bigtable.v2.Cell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Cell.timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Cell.value", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.Cell.labels", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=270, - serialized_end=333, -) - - -_ROWRANGE = _descriptor.Descriptor( - name="RowRange", - full_name="google.bigtable.v2.RowRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_key_closed", - full_name="google.bigtable.v2.RowRange.start_key_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_key_open", - full_name="google.bigtable.v2.RowRange.start_key_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_open", - full_name="google.bigtable.v2.RowRange.end_key_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_closed", - full_name="google.bigtable.v2.RowRange.end_key_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key", - full_name="google.bigtable.v2.RowRange.start_key", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key", - full_name="google.bigtable.v2.RowRange.end_key", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=336, - serialized_end=474, -) - - -_ROWSET = _descriptor.Descriptor( - name="RowSet", - full_name="google.bigtable.v2.RowSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_keys", - full_name="google.bigtable.v2.RowSet.row_keys", - index=0, - number=1, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_ranges", - full_name="google.bigtable.v2.RowSet.row_ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=476, - serialized_end=552, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name="ColumnRange", - full_name="google.bigtable.v2.ColumnRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ColumnRange.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_qualifier", - full_name="google.bigtable.v2.ColumnRange.start_qualifier", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_qualifier", - full_name="google.bigtable.v2.ColumnRange.end_qualifier", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=555, - serialized_end=753, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name="TimestampRange", - full_name="google.bigtable.v2.TimestampRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=755, - serialized_end=833, -) - - -_VALUERANGE = _descriptor.Descriptor( - name="ValueRange", - full_name="google.bigtable.v2.ValueRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_value_closed", - full_name="google.bigtable.v2.ValueRange.start_value_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_value_open", - full_name="google.bigtable.v2.ValueRange.start_value_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_closed", - full_name="google.bigtable.v2.ValueRange.end_value_closed", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_open", - full_name="google.bigtable.v2.ValueRange.end_value_open", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_value", - full_name="google.bigtable.v2.ValueRange.start_value", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_value", - full_name="google.bigtable.v2.ValueRange.end_value", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=836, - serialized_end=988, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name="Chain", - full_name="google.bigtable.v2.RowFilter.Chain", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Chain.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1807, - serialized_end=1862, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name="Interleave", - full_name="google.bigtable.v2.RowFilter.Interleave", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Interleave.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1864, - serialized_end=1924, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name="Condition", - full_name="google.bigtable.v2.RowFilter.Condition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_filter", - full_name="google.bigtable.v2.RowFilter.Condition.true_filter", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_filter", - full_name="google.bigtable.v2.RowFilter.Condition.false_filter", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1927, - serialized_end=2100, -) - -_ROWFILTER = _descriptor.Descriptor( - name="RowFilter", - full_name="google.bigtable.v2.RowFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chain", - full_name="google.bigtable.v2.RowFilter.chain", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="interleave", - full_name="google.bigtable.v2.RowFilter.interleave", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="condition", - full_name="google.bigtable.v2.RowFilter.condition", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="sink", - full_name="google.bigtable.v2.RowFilter.sink", - index=3, - number=16, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pass_all_filter", - full_name="google.bigtable.v2.RowFilter.pass_all_filter", - index=4, - number=17, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="block_all_filter", - full_name="google.bigtable.v2.RowFilter.block_all_filter", - index=5, - number=18, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_regex_filter", - full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", - index=6, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_sample_filter", - full_name="google.bigtable.v2.RowFilter.row_sample_filter", - index=7, - number=14, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name_regex_filter", - full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", - index=8, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier_regex_filter", - full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", - index=9, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_range_filter", - full_name="google.bigtable.v2.RowFilter.column_range_filter", - index=10, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_range_filter", - full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", - index=11, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_regex_filter", - full_name="google.bigtable.v2.RowFilter.value_regex_filter", - index=12, - number=9, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_range_filter", - full_name="google.bigtable.v2.RowFilter.value_range_filter", - index=13, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_offset_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", - index=14, - number=10, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", - index=15, - number=11, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_column_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", - index=16, - number=12, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="strip_value_transformer", - full_name="google.bigtable.v2.RowFilter.strip_value_transformer", - index=17, - number=13, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="apply_label_transformer", - full_name="google.bigtable.v2.RowFilter.apply_label_transformer", - index=18, - number=19, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _ROWFILTER_CHAIN, - _ROWFILTER_INTERLEAVE, - _ROWFILTER_CONDITION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter", - full_name="google.bigtable.v2.RowFilter.filter", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=991, - serialized_end=2110, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name="SetCell", - full_name="google.bigtable.v2.Mutation.SetCell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.SetCell.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Mutation.SetCell.value", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2408, - serialized_end=2505, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name="DeleteFromColumn", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_range", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2507, - serialized_end=2628, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name="DeleteFromFamily", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2630, - serialized_end=2669, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name="DeleteFromRow", - full_name="google.bigtable.v2.Mutation.DeleteFromRow", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2671, - serialized_end=2686, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.bigtable.v2.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="set_cell", - full_name="google.bigtable.v2.Mutation.set_cell", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_column", - full_name="google.bigtable.v2.Mutation.delete_from_column", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_family", - full_name="google.bigtable.v2.Mutation.delete_from_family", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_row", - full_name="google.bigtable.v2.Mutation.delete_from_row", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATION_SETCELL, - _MUTATION_DELETEFROMCOLUMN, - _MUTATION_DELETEFROMFAMILY, - _MUTATION_DELETEFROMROW, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mutation", - full_name="google.bigtable.v2.Mutation.mutation", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2113, - serialized_end=2698, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name="ReadModifyWriteRule", - full_name="google.bigtable.v2.ReadModifyWriteRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="append_value", - full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="increment_amount", - full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.v2.ReadModifyWriteRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2701, - serialized_end=2829, -) - -_ROW.fields_by_name["families"].message_type = _FAMILY -_FAMILY.fields_by_name["columns"].message_type = _COLUMN -_COLUMN.fields_by_name["cells"].message_type = _CELL -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_closed"] -) -_ROWRANGE.fields_by_name[ - "start_key_closed" -].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_open"] -) -_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "start_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_open"] -) -_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_closed"] -) -_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_closed"] -) -_VALUERANGE.fields_by_name[ - "start_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_open"] -) -_VALUERANGE.fields_by_name[ - "start_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_closed"] -) -_VALUERANGE.fields_by_name[ - "end_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_open"] -) -_VALUERANGE.fields_by_name[ - "end_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) -_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["interleave"] -) -_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["condition"] -) -_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) -_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["pass_all_filter"] -) -_ROWFILTER.fields_by_name[ - "pass_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["block_all_filter"] -) -_ROWFILTER.fields_by_name[ - "block_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_key_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "row_key_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_sample_filter"] -) -_ROWFILTER.fields_by_name[ - "row_sample_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["family_name_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "family_name_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "column_qualifier_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_range_filter"] -) -_ROWFILTER.fields_by_name[ - "column_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["timestamp_range_filter"] -) -_ROWFILTER.fields_by_name[ - "timestamp_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "value_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_range_filter"] -) -_ROWFILTER.fields_by_name[ - "value_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_offset_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_column_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["strip_value_transformer"] -) -_ROWFILTER.fields_by_name[ - "strip_value_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["apply_label_transformer"] -) -_ROWFILTER.fields_by_name[ - "apply_label_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) -_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_column"] -) -_MUTATION.fields_by_name[ - "delete_from_column" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_family"] -) -_MUTATION.fields_by_name[ - "delete_from_family" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_row"] -) -_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["append_value"] -) -_READMODIFYWRITERULE.fields_by_name[ - "append_value" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["increment_amount"] -) -_READMODIFYWRITERULE.fields_by_name[ - "increment_amount" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -DESCRIPTOR.message_types_by_name["Row"] = _ROW -DESCRIPTOR.message_types_by_name["Family"] = _FAMILY -DESCRIPTOR.message_types_by_name["Column"] = _COLUMN -DESCRIPTOR.message_types_by_name["Cell"] = _CELL -DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE -DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET -DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE -DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Row = _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - { - "DESCRIPTOR": _ROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies the complete (requested) contents of a single row of a - table. Rows which exceed 256MiB in size cannot be read in full. - - Attributes: - key: - The unique key which identifies this row within its table. - This is the same key that’s used to identify the row in, for - example, a MutateRowRequest. May contain any non-empty byte - string up to 4KiB in length. - families: - May be empty, but only if the entire row is empty. The mutual - ordering of column families is not specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - }, -) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType( - "Family", - (_message.Message,), - { - "DESCRIPTOR": _FAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column family - intersection of a table. - - Attributes: - name: - The unique key which identifies this family within its row. - This is the same key that’s used to identify the family in, - for example, a RowFilter which sets its - “family_name_regex_filter” field. Must match - ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may - produce cells in a sentinel family with an empty name. Must be - no greater than 64 characters in length. - columns: - Must not be empty. Sorted in order of increasing “qualifier”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - }, -) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType( - "Column", - (_message.Message,), - { - "DESCRIPTOR": _COLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column intersection - of a table. - - Attributes: - qualifier: - The unique key which identifies this column within its family. - This is the same key that’s used to identify the column in, - for example, a RowFilter which sets its - ``column_qualifier_regex_filter`` field. May contain any byte - string, including the empty string, up to 16kiB in length. - cells: - Must not be empty. Sorted in order of decreasing - “timestamp_micros”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - }, -) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType( - "Cell", - (_message.Message,), - { - "DESCRIPTOR": _CELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a - table. - - Attributes: - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. - value: - The value stored in the cell. May contain any byte string, - including the empty string, up to 100MiB in length. - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - }, -) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType( - "RowRange", - (_message.Message,), - { - "DESCRIPTOR": _ROWRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of rows. - - Attributes: - start_key: - The row key at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_key_closed: - Used when giving an inclusive lower bound for the range. - start_key_open: - Used when giving an exclusive lower bound for the range. - end_key: - The row key at which to end the range. If neither field is - set, interpreted as the infinite row key, exclusive. - end_key_open: - Used when giving an exclusive upper bound for the range. - end_key_closed: - Used when giving an inclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - }, -) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType( - "RowSet", - (_message.Message,), - { - "DESCRIPTOR": _ROWSET, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a non-contiguous set of rows. - - Attributes: - row_keys: - Single rows included in the set. - row_ranges: - Contiguous row ranges included in the set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - }, -) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType( - "ColumnRange", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of columns within a single column family. - The range spans from : to - :, where both bounds can be either - inclusive or exclusive. - - Attributes: - family_name: - The name of the column family within which this range falls. - start_qualifier: - The column qualifier at which to start the range (within - ``column_family``). If neither field is set, interpreted as - the empty string, inclusive. - start_qualifier_closed: - Used when giving an inclusive lower bound for the range. - start_qualifier_open: - Used when giving an exclusive lower bound for the range. - end_qualifier: - The column qualifier at which to end the range (within - ``column_family``). If neither field is set, interpreted as - the infinite string, exclusive. - end_qualifier_closed: - Used when giving an inclusive upper bound for the range. - end_qualifier_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - }, -) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType( - "TimestampRange", - (_message.Message,), - { - "DESCRIPTOR": _TIMESTAMPRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specified a contiguous range of microsecond timestamps. - - Attributes: - start_timestamp_micros: - Inclusive lower bound. If left empty, interpreted as 0. - end_timestamp_micros: - Exclusive upper bound. If left empty, interpreted as infinity. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - }, -) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType( - "ValueRange", - (_message.Message,), - { - "DESCRIPTOR": _VALUERANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of raw byte values. - - Attributes: - start_value: - The value at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_value_closed: - Used when giving an inclusive lower bound for the range. - start_value_open: - Used when giving an exclusive lower bound for the range. - end_value: - The value at which to end the range. If neither field is set, - interpreted as the infinite string, exclusive. - end_value_closed: - Used when giving an inclusive upper bound for the range. - end_value_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - }, -) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType( - "RowFilter", - (_message.Message,), - { - "Chain": _reflection.GeneratedProtocolMessageType( - "Chain", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CHAIN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. - - Attributes: - filters: - The elements of “filters” are chained together to process the - input row: in row -> f(0) -> intermediate row -> f(1) -> … -> - f(N) -> out row The full chain is executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - }, - ), - "Interleave": _reflection.GeneratedProtocolMessageType( - "Interleave", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_INTERLEAVE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends each row to each of several component - RowFilters and interleaves the results. - - Attributes: - filters: - The elements of “filters” all process a copy of the input row, - and the results are pooled, sorted, and combined into a single - output row. If multiple cells are produced with the same - column and timestamp, they will all appear in the output row - in an unspecified mutual order. Consider the following - example, with three filters: :: - input row | - ----------------------------------------------------- - | | | - f(0) f(1) f(2) - | | | 1: - foo,bar,10,x foo,bar,10,z far,bar,7,a - 2: foo,blah,11,z far,blah,5,x - far,blah,5,x | | - | - ----------------------------------------------------- - | 1: foo,bar,10,z // could have - switched with #2 2: foo,bar,10,x // - could have switched with #1 3: - foo,blah,11,z 4: far,bar,7,a 5: - far,blah,5,x // identical to #6 6: - far,blah,5,x // identical to #5 All interleaved filters are - executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - }, - ), - "Condition": _reflection.GeneratedProtocolMessageType( - "Condition", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CONDITION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which evaluates one of two possible RowFilters, depending - on whether or not a predicate RowFilter outputs any cells from the - input row. IMPORTANT NOTE: The predicate filter does not execute - atomically with the true and false filters, which may lead to - inconsistent or unexpected results. Additionally, Condition filters - have poor performance, especially when filters are set for the false - condition. - - Attributes: - predicate_filter: - If ``predicate_filter`` outputs any cells, then - ``true_filter`` will be evaluated on the input row. Otherwise, - ``false_filter`` will be evaluated. - true_filter: - The filter to apply to the input row if ``predicate_filter`` - returns any results. If not provided, no results will be - returned in the true case. - false_filter: - The filter to apply to the input row if ``predicate_filter`` - does not return any results. If not provided, no results will - be returned in the false case. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - }, - ), - "DESCRIPTOR": _ROWFILTER, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Takes a row as input and produces an alternate view of the row based - on specified rules. For example, a RowFilter might trim down a row to - include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their - values. More complicated filters can be composed out of these - components to express requests such as, “within every column of a - particular family, give just the two most recent cells which are older - than timestamp X.” There are two broad categories of RowFilters (true - filters and transformers), as well as two ways to compose simple - filters into more complex ones (chains and interleaves). They work as - follows: - True filters alter the input row by excluding some of its - cells wholesale from the output row. An example of a true filter is - the ``value_regex_filter``, which excludes cells whose values don’t - match the specified pattern. All regex true filters use RE2 syntax - (https://github.com/google/re2/wiki/Syntax) in raw byte mode - (RE2::Latin1), and are evaluated as full matches. An important point - to keep in mind is that ``RE2(.)`` is equivalent by default to - ``RE2([^\n])``, meaning that it does not match newlines. When - attempting to match an arbitrary byte, you should therefore use the - escape sequence ``\C``, which may need to be further escaped as - ``\\C`` in your client language. - Transformers alter the input row - by changing the values of some of its cells in the output, without - excluding them completely. Currently, the only supported - transformer is the ``strip_value_transformer``, which replaces - every cell’s value with the empty string. - Chains and - interleaves are described in more detail in the RowFilter.Chain and - RowFilter.Interleave documentation. The total serialized size of a - RowFilter message must not exceed 4096 bytes, and RowFilters may not - be nested within each other (in Chains or Interleaves) to a depth of - more than 20. - - Attributes: - filter: - Which of the possible RowFilter types to apply. If none are - set, this RowFilter returns all cells in the input row. - chain: - Applies several RowFilters to the data in sequence, - progressively narrowing the results. - interleave: - Applies several RowFilters to the data in parallel and - combines the results. - condition: - Applies one of two possible RowFilters to the data based on - the output of a predicate RowFilter. - sink: - ADVANCED USE ONLY. Hook for introspection into the RowFilter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Consider the following example: :: - Chain( FamilyRegex("A"), Interleave( All(), - Chain(Label("foo"), Sink()) ), QualifierRegex("B") - ) A,A,1,w - A,B,2,x B,B,4,z - | FamilyRegex("A") - | A,A,1,w - A,B,2,x | - +------------+-------------+ | - | All() Label(foo) - | | A,A,1,w - A,A,1,w,labels:[foo] A,B,2,x - A,B,2,x,labels:[foo] | | - | Sink() --------------+ | - | | +------------+ x------+ - A,A,1,w,labels:[foo] | - A,B,2,x,labels:[foo] A,A,1,w - | A,B,2,x | - | | - QualifierRegex("B") | - | | - A,B,2,x | - | | - +--------------------------------+ | - A,A,1,w,labels:[foo] - A,B,2,x,labels:[foo] // could be switched - A,B,2,x // could be switched Despite being - excluded by the qualifier filter, a copy of every cell that - reaches the sink is present in the final result. As with an - [Interleave][google.bigtable.v2.RowFilter.Interleave], - duplicate cells are possible, and appear in an unspecified - mutual order. In this case we have a duplicate with column - “A:B” and timestamp 2, because one copy passed through the all - filter while the other was passed through the label and sink. - Note that one copy has label “foo”, while the other does not. - Cannot be used within the ``predicate_filter``, - ``true_filter``, or ``false_filter`` of a - [Condition][google.bigtable.v2.RowFilter.Condition]. - pass_all_filter: - Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - block_all_filter: - Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - row_key_regex_filter: - Matches only cells from rows whose keys satisfy the given RE2 - regex. In other words, passes through the entire row when the - key matches, and otherwise produces an empty row. Note that, - since row keys can contain arbitrary bytes, the ``\C`` escape - sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\n``, which - may be present in a binary key. - row_sample_filter: - Matches all cells from a row with probability p, and matches - no cells from the row with probability 1-p. - family_name_regex_filter: - Matches only cells from columns whose families satisfy the - given RE2 regex. For technical reasons, the regex must not - contain the ``:`` character, even if it is not being used as a - literal. Note that, since column families cannot contain the - new line character ``\n``, it is sufficient to use ``.`` as a - full wildcard when matching column family names. - column_qualifier_regex_filter: - Matches only cells from columns whose qualifiers satisfy the - given RE2 regex. Note that, since column qualifiers can - contain arbitrary bytes, the ``\C`` escape sequence must be - used if a true wildcard is desired. The ``.`` character will - not match the new line character ``\n``, which may be present - in a binary qualifier. - column_range_filter: - Matches only cells from columns within the given range. - timestamp_range_filter: - Matches only cells with timestamps within the given range. - value_regex_filter: - Matches only cells with values that satisfy the given regular - expression. Note that, since cell values can contain arbitrary - bytes, the ``\C`` escape sequence must be used if a true - wildcard is desired. The ``.`` character will not match the - new line character ``\n``, which may be present in a binary - value. - value_range_filter: - Matches only cells with values that fall within the given - range. - cells_per_row_offset_filter: - Skips the first N cells of each row, matching all subsequent - cells. If duplicate cells are present, as is possible when - using an Interleave, each copy of the cell is counted - separately. - cells_per_row_limit_filter: - Matches only the first N cells of each row. If duplicate cells - are present, as is possible when using an Interleave, each - copy of the cell is counted separately. - cells_per_column_limit_filter: - Matches only the most recent N cells within each column. For - example, if N=2, this filter would match column ``foo:bar`` at - timestamps 10 and 9, skip all earlier cells in ``foo:bar``, - and then begin matching again in column ``foo:bar2``. If - duplicate cells are present, as is possible when using an - Interleave, each copy of the cell is counted separately. - strip_value_transformer: - Replaces each cell’s value with the empty string. - apply_label_transformer: - Applies the given label to all cells in the output row. This - allows the client to determine which results were produced - from which part of the filter. Values must be at most 15 - characters in length, and match the RE2 pattern - ``[a-z0-9\\-]+`` Due to a technical limitation, it is not - currently possible to apply multiple labels to a cell. As a - result, a Chain may have no more than one sub-filter which - contains a ``apply_label_transformer``. It is okay for an - Interleave to contain multiple ``apply_label_transformers``, - as they will be applied to separate copies of the input. This - may be relaxed in the future. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - }, -) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - { - "SetCell": _reflection.GeneratedProtocolMessageType( - "SetCell", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_SETCELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which sets the value of the specified cell. - - Attributes: - family_name: - The name of the family into which new data should be written. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column into which new data should be - written. Can be any byte string, including the empty string. - timestamp_micros: - The timestamp of the cell into which new data should be - written. Use -1 for current Bigtable server time. Otherwise, - the client should set this value itself, noting that the - default value is a timestamp of zero if the field is left - unspecified. Values must match the granularity of the table - (e.g. micros, millis). - value: - The value to be written into the specified cell. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - }, - ), - "DeleteFromColumn": _reflection.GeneratedProtocolMessageType( - "DeleteFromColumn", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMCOLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes cells from the specified column, optionally - restricting the deletions to a given timestamp range. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column from which cells should be - deleted. Can be any byte string, including the empty string. - time_range: - The range of timestamps within which cells should be deleted. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - }, - ), - "DeleteFromFamily": _reflection.GeneratedProtocolMessageType( - "DeleteFromFamily", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the specified column family. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - }, - ), - "DeleteFromRow": _reflection.GeneratedProtocolMessageType( - "DeleteFromRow", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the containing row.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - }, - ), - "DESCRIPTOR": _MUTATION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a particular change to be made to the contents of a row. - - Attributes: - mutation: - Which of the possible Mutation types to apply. - set_cell: - Set a cell’s value. - delete_from_column: - Deletes cells from a column. - delete_from_family: - Deletes cells from a column family. - delete_from_row: - Deletes cells from the entire row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - }, -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRule", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITERULE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies an atomic read/modify/write operation on the latest value of - the specified column. - - Attributes: - family_name: - The name of the family to which the read/modify/write should - be applied. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column to which the read/modify/write - should be applied. Can be any byte string, including the empty - string. - rule: - The rule used to determine the column’s new latest value from - its current latest value. - append_value: - Rule specifying that ``append_value`` be appended to the - existing value. If the targeted cell is unset, it will be - treated as containing the empty string. - increment_amount: - Rule specifying that ``increment_amount`` be added to the - existing value. If the targeted cell is unset, it will be - treated as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit big- - endian signed integer), or the entire request will fail. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/google/cloud/bigtable_v2/proto/data_pb2_grpc.py deleted file mode 100644 index 8a9393943..000000000 --- a/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/bigtable_v2/proto/instance.proto b/google/cloud/bigtable_v2/proto/instance.proto deleted file mode 100644 index bb69b1f66..000000000 --- a/google/cloud/bigtable_v2/proto/instance.proto +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. -message Instance { - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // The type of the instance. - enum Type { - // The type of the instance is unspecified. If set when creating an - // instance, a `PRODUCTION` instance will be created. If set when updating - // an instance, the type will be left unchanged. - TYPE_UNSPECIFIED = 0; - - // An instance meant for production use. `serve_nodes` must be set - // on the cluster. - PRODUCTION = 1; - - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. - DEVELOPMENT = 2; - } - - // (`OutputOnly`) - // The unique name of the instance. Values are of the form - // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1; - - // The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2; - - // (`OutputOnly`) - // The current state of the instance. - State state = 3; - - // The type of the instance. Defaults to `PRODUCTION`. - Type type = 4; - - // Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. They can be used to filter resources and aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. - map labels = 5; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // (`OutputOnly`) - // The unique name of the cluster. Values are of the form - // `projects//instances//clusters/[a-z][-a-z0-9]*`. - string name = 1; - - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this - // cluster. Currently only zones are supported, so values should be of the - // form `projects//locations/`. - string location = 2; - - // (`OutputOnly`) - // The current state of the cluster. - State state = 3; - - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; - - // (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; -} - -// A configuration object describing how Cloud Bigtable should treat traffic -// from a particular end user application. -message AppProfile { - // Read/write requests may be routed to any cluster in the instance, and will - // fail over to another cluster in the event of transient errors or delays. - // Choosing this option sacrifices read-your-writes consistency to improve - // availability. - message MultiClusterRoutingUseAny { - - } - - // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency, but does not improve - // availability. - message SingleClusterRouting { - // The cluster to which read/write requests should be routed. - string cluster_id = 1; - - // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests to - // the same table/row/column in multiple clusters. - bool allow_transactional_writes = 2; - } - - // (`OutputOnly`) - // The unique name of the app profile. Values are of the form - // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - string name = 1; - - // Strongly validated etag for optimistic concurrency control. Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` to - // fail the request if there has been a modification in the mean time. The - // `update_mask` of the request need not include `etag` for this protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more - // details. - string etag = 2; - - // Optional long form description of the use case for this AppProfile. - string description = 3; - - // The routing policy for all read/write requests which use this app profile. - // A value must be explicitly set. - oneof routing_policy { - // Use a multi-cluster routing policy that may pick any cluster. - MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; - - // Use a single-cluster routing policy. - SingleClusterRouting single_cluster_routing = 6; - } -} diff --git a/google/cloud/bigtable_v2/proto/table.proto b/google/cloud/bigtable_v2/proto/table.proto deleted file mode 100644 index 5d4374eff..000000000 --- a/google/cloud/bigtable_v2/proto/table.proto +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - // The state of a table's data in a particular cluster. - message ClusterState { - // Table replication states. - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving Data API requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve Data API requests from this cluster. Depending on - // replication delay, reads may not immediately reflect the state of the - // table in other clusters. - READY = 4; - } - - // (`OutputOnly`) - // The state of replication for the table in this cluster. - ReplicationState replication_state = 1; - } - - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Only populates `name` and fields related to the table's - // replication state. - REPLICATION_VIEW = 3; - - // Populates all fields. - FULL = 4; - } - - // (`OutputOnly`) - // The unique name of the table. Values are of the form - // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` - string name = 1; - - // (`OutputOnly`) - // Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` - map cluster_states = 2; - - // (`CreationOnly`) - // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` - map column_families = 3; - - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL` - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - // Garbage collection rules. - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} - -// A snapshot of a table at a particular time. A snapshot can be used as a -// checkpoint for data restoration or a data source for a new table. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message Snapshot { - // Possible states of a snapshot. - enum State { - // The state of the snapshot could not be determined. - STATE_NOT_KNOWN = 0; - - // The snapshot has been successfully created and can serve all requests. - READY = 1; - - // The snapshot is currently being created, and may be destroyed if the - // creation process encounters an error. A snapshot may not be restored to a - // table while it is being created. - CREATING = 2; - } - - // (`OutputOnly`) - // The unique name of the snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; - - // (`OutputOnly`) - // The source table at the time the snapshot was taken. - Table source_table = 2; - - // (`OutputOnly`) - // The size of the data in the source table at the time the snapshot was - // taken. In some cases, this value may be computed asynchronously via a - // background process and a placeholder of 0 will be used in the meantime. - int64 data_size_bytes = 3; - - // (`OutputOnly`) - // The time when the snapshot is created. - google.protobuf.Timestamp create_time = 4; - - // (`OutputOnly`) - // The time when the snapshot will be deleted. The maximum amount of time a - // snapshot can stay active is 365 days. If 'ttl' is not specified, - // the default maximum of 365 days will be used. - google.protobuf.Timestamp delete_time = 5; - - // (`OutputOnly`) - // The current state of the snapshot. - State state = 6; - - // (`OutputOnly`) - // Description of the snapshot. - string description = 7; -} diff --git a/google/cloud/bigtable_v2/py.typed b/google/cloud/bigtable_v2/py.typed new file mode 100644 index 000000000..889d34043 --- /dev/null +++ b/google/cloud/bigtable_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. diff --git a/google/cloud/bigtable_v2/services/__init__.py b/google/cloud/bigtable_v2/services/__init__.py new file mode 100644 index 000000000..42ffdf2bc --- /dev/null +++ b/google/cloud/bigtable_v2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/bigtable_v2/services/bigtable/__init__.py b/google/cloud/bigtable_v2/services/bigtable/__init__.py new file mode 100644 index 000000000..622941c65 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import BigtableClient +from .async_client import BigtableAsyncClient + +__all__ = ( + "BigtableClient", + "BigtableAsyncClient", +) diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py new file mode 100644 index 000000000..6e170e791 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data + +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .client import BigtableClient + + +class BigtableAsyncClient: + """Service for reading from and writing to existing Bigtable + tables. + """ + + _client: BigtableClient + + DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + + table_path = staticmethod(BigtableClient.table_path) + parse_table_path = staticmethod(BigtableClient.parse_table_path) + + common_billing_account_path = staticmethod( + BigtableClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableClient.common_folder_path) + parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path) + + common_organization_path = staticmethod(BigtableClient.common_organization_path) + parse_common_organization_path = staticmethod( + BigtableClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableClient.common_project_path) + parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path) + + common_location_path = staticmethod(BigtableClient.common_location_path) + parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) + + from_service_account_info = BigtableClient.from_service_account_info + from_service_account_file = BigtableClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableClient).get_transport_class, type(BigtableClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + def read_rows( + self, + request: bigtable.ReadRowsRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (:class:`google.cloud.bigtable_v2.types.ReadRowsRequest`): + The request object. Request message for + Bigtable.ReadRows. + table_name (:class:`str`): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.ReadRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=43200.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def sample_row_keys( + self, + request: bigtable.SampleRowKeysRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (:class:`google.cloud.bigtable_v2.types.SampleRowKeysRequest`): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (:class:`str`): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.SampleRowKeysRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.sample_row_keys, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def mutate_row( + self, + request: bigtable.MutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (:class:`google.cloud.bigtable_v2.types.MutateRowRequest`): + The request object. Request message for + Bigtable.MutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.MutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_rows( + self, + request: bigtable.MutateRowsRequest = None, + *, + table_name: str = None, + entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (:class:`google.cloud.bigtable_v2.types.MutateRowsRequest`): + The request object. Request message for + BigtableService.MutateRows. + table_name (:class:`str`): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (:class:`Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.MutateRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def check_and_mutate_row( + self, + request: bigtable.CheckAndMutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + predicate_filter: data.RowFilter = None, + true_mutations: Sequence[data.Mutation] = None, + false_mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (:class:`google.cloud.bigtable_v2.types.CheckAndMutateRowRequest`): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (:class:`google.cloud.bigtable_v2.types.RowFilter`): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.CheckAndMutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if true_mutations: + request.true_mutations.extend(true_mutations) + if false_mutations: + request.false_mutations.extend(false_mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_and_mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def read_modify_write_row( + self, + request: bigtable.ReadModifyWriteRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + rules: Sequence[data.ReadModifyWriteRule] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Args: + request (:class:`google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest`): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (:class:`Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.ReadModifyWriteRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if rules: + request.rules.extend(rules) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_modify_write_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableAsyncClient",) diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py new file mode 100644 index 000000000..8ae811054 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -0,0 +1,1041 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data + +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableGrpcTransport +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport + + +class BigtableClientMeta(type): + """Metaclass for the Bigtable client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] + _transport_registry["grpc"] = BigtableGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[BigtableTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableClient(metaclass=BigtableClientMeta): + """Service for reading from and writing to existing Bigtable + tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtable.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def table_path(project: str, instance: str, table: str,) -> str: + """Return a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str, str]: + """Parse a table path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTransport): + # transport is a BigtableTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def read_rows( + self, + request: bigtable.ReadRowsRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ReadRowsResponse]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (google.cloud.bigtable_v2.types.ReadRowsRequest): + The request object. Request message for + Bigtable.ReadRows. + table_name (str): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadRowsRequest): + request = bigtable.ReadRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_rows] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def sample_row_keys( + self, + request: bigtable.SampleRowKeysRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.SampleRowKeysResponse]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (google.cloud.bigtable_v2.types.SampleRowKeysRequest): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (str): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.SampleRowKeysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.SampleRowKeysRequest): + request = bigtable.SampleRowKeysRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.sample_row_keys] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_row( + self, + request: bigtable.MutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (google.cloud.bigtable_v2.types.MutateRowRequest): + The request object. Request message for + Bigtable.MutateRow. + table_name (str): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowRequest): + request = bigtable.MutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_rows( + self, + request: bigtable.MutateRowsRequest = None, + *, + table_name: str = None, + entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.MutateRowsResponse]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (google.cloud.bigtable_v2.types.MutateRowsRequest): + The request object. Request message for + BigtableService.MutateRows. + table_name (str): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowsRequest): + request = bigtable.MutateRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_rows] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def check_and_mutate_row( + self, + request: bigtable.CheckAndMutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + predicate_filter: data.RowFilter = None, + true_mutations: Sequence[data.Mutation] = None, + false_mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (google.cloud.bigtable_v2.types.CheckAndMutateRowRequest): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.CheckAndMutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.CheckAndMutateRowRequest): + request = bigtable.CheckAndMutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if true_mutations: + request.true_mutations.extend(true_mutations) + if false_mutations: + request.false_mutations.extend(false_mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def read_modify_write_row( + self, + request: bigtable.ReadModifyWriteRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + rules: Sequence[data.ReadModifyWriteRule] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Args: + request (google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadModifyWriteRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadModifyWriteRowRequest): + request = bigtable.ReadModifyWriteRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if rules: + request.rules.extend(rules) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableClient",) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py new file mode 100644 index 000000000..e18b45924 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTransport +from .grpc import BigtableGrpcTransport +from .grpc_asyncio import BigtableGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] +_transport_registry["grpc"] = BigtableGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + +__all__ = ( + "BigtableTransport", + "BigtableGrpcTransport", + "BigtableGrpcAsyncIOTransport", +) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py new file mode 100644 index 000000000..8f3d81687 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableTransport(abc.ABC): + """Abstract transport class for Bigtable.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_rows: gapic_v1.method.wrap_method( + self.read_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=43200.0, + client_info=client_info, + ), + self.sample_row_keys: gapic_v1.method.wrap_method( + self.sample_row_keys, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_row: gapic_v1.method.wrap_method( + self.mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_rows: gapic_v1.method.wrap_method( + self.mutate_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.check_and_mutate_row: gapic_v1.method.wrap_method( + self.check_and_mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.read_modify_write_row: gapic_v1.method.wrap_method( + self.read_modify_write_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=client_info, + ), + } + + @property + def read_rows( + self, + ) -> typing.Callable[ + [bigtable.ReadRowsRequest], + typing.Union[ + bigtable.ReadRowsResponse, typing.Awaitable[bigtable.ReadRowsResponse] + ], + ]: + raise NotImplementedError() + + @property + def sample_row_keys( + self, + ) -> typing.Callable[ + [bigtable.SampleRowKeysRequest], + typing.Union[ + bigtable.SampleRowKeysResponse, + typing.Awaitable[bigtable.SampleRowKeysResponse], + ], + ]: + raise NotImplementedError() + + @property + def mutate_row( + self, + ) -> typing.Callable[ + [bigtable.MutateRowRequest], + typing.Union[ + bigtable.MutateRowResponse, typing.Awaitable[bigtable.MutateRowResponse] + ], + ]: + raise NotImplementedError() + + @property + def mutate_rows( + self, + ) -> typing.Callable[ + [bigtable.MutateRowsRequest], + typing.Union[ + bigtable.MutateRowsResponse, typing.Awaitable[bigtable.MutateRowsResponse] + ], + ]: + raise NotImplementedError() + + @property + def check_and_mutate_row( + self, + ) -> typing.Callable[ + [bigtable.CheckAndMutateRowRequest], + typing.Union[ + bigtable.CheckAndMutateRowResponse, + typing.Awaitable[bigtable.CheckAndMutateRowResponse], + ], + ]: + raise NotImplementedError() + + @property + def read_modify_write_row( + self, + ) -> typing.Callable[ + [bigtable.ReadModifyWriteRowRequest], + typing.Union[ + bigtable.ReadModifyWriteRowResponse, + typing.Awaitable[bigtable.ReadModifyWriteRowResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableTransport",) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py new file mode 100644 index 000000000..6b34e8ab0 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -0,0 +1,432 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO + + +class BigtableGrpcTransport(BigtableTransport): + """gRPC backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_rows( + self, + ) -> Callable[[bigtable.ReadRowsRequest], bigtable.ReadRowsResponse]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + ~.ReadRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_rows" not in self._stubs: + self._stubs["read_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs["read_rows"] + + @property + def sample_row_keys( + self, + ) -> Callable[[bigtable.SampleRowKeysRequest], bigtable.SampleRowKeysResponse]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + ~.SampleRowKeysResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "sample_row_keys" not in self._stubs: + self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs["sample_row_keys"] + + @property + def mutate_row( + self, + ) -> Callable[[bigtable.MutateRowRequest], bigtable.MutateRowResponse]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + ~.MutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_row" not in self._stubs: + self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs["mutate_row"] + + @property + def mutate_rows( + self, + ) -> Callable[[bigtable.MutateRowsRequest], bigtable.MutateRowsResponse]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + ~.MutateRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_rows" not in self._stubs: + self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs["mutate_rows"] + + @property + def check_and_mutate_row( + self, + ) -> Callable[ + [bigtable.CheckAndMutateRowRequest], bigtable.CheckAndMutateRowResponse + ]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + ~.CheckAndMutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_and_mutate_row" not in self._stubs: + self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs["check_and_mutate_row"] + + @property + def read_modify_write_row( + self, + ) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], bigtable.ReadModifyWriteRowResponse + ]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + ~.ReadModifyWriteRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_modify_write_row" not in self._stubs: + self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs["read_modify_write_row"] + + +__all__ = ("BigtableGrpcTransport",) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py new file mode 100644 index 000000000..aa7ff2ecc --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -0,0 +1,440 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableGrpcTransport + + +class BigtableGrpcAsyncIOTransport(BigtableTransport): + """gRPC AsyncIO backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_rows( + self, + ) -> Callable[[bigtable.ReadRowsRequest], Awaitable[bigtable.ReadRowsResponse]]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + Awaitable[~.ReadRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_rows" not in self._stubs: + self._stubs["read_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs["read_rows"] + + @property + def sample_row_keys( + self, + ) -> Callable[ + [bigtable.SampleRowKeysRequest], Awaitable[bigtable.SampleRowKeysResponse] + ]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + Awaitable[~.SampleRowKeysResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "sample_row_keys" not in self._stubs: + self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs["sample_row_keys"] + + @property + def mutate_row( + self, + ) -> Callable[[bigtable.MutateRowRequest], Awaitable[bigtable.MutateRowResponse]]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + Awaitable[~.MutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_row" not in self._stubs: + self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs["mutate_row"] + + @property + def mutate_rows( + self, + ) -> Callable[[bigtable.MutateRowsRequest], Awaitable[bigtable.MutateRowsResponse]]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + Awaitable[~.MutateRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_rows" not in self._stubs: + self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs["mutate_rows"] + + @property + def check_and_mutate_row( + self, + ) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + Awaitable[bigtable.CheckAndMutateRowResponse], + ]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + Awaitable[~.CheckAndMutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_and_mutate_row" not in self._stubs: + self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs["check_and_mutate_row"] + + @property + def read_modify_write_row( + self, + ) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + Awaitable[bigtable.ReadModifyWriteRowResponse], + ]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + Awaitable[~.ReadModifyWriteRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_modify_write_row" not in self._stubs: + self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs["read_modify_write_row"] + + +__all__ = ("BigtableGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_v2/types.py b/google/cloud/bigtable_v2/types.py deleted file mode 100644 index 607e1b09c..000000000 --- a/google/cloud/bigtable_v2/types.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 -from google.protobuf import any_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - any_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - bigtable_pb2, - data_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py new file mode 100644 index 000000000..0aa74d208 --- /dev/null +++ b/google/cloud/bigtable_v2/types/__init__.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .data import ( + Row, + Family, + Column, + Cell, + RowRange, + RowSet, + ColumnRange, + TimestampRange, + ValueRange, + RowFilter, + Mutation, + ReadModifyWriteRule, +) +from .bigtable import ( + ReadRowsRequest, + ReadRowsResponse, + SampleRowKeysRequest, + SampleRowKeysResponse, + MutateRowRequest, + MutateRowResponse, + MutateRowsRequest, + MutateRowsResponse, + CheckAndMutateRowRequest, + CheckAndMutateRowResponse, + ReadModifyWriteRowRequest, + ReadModifyWriteRowResponse, +) + +__all__ = ( + "Row", + "Family", + "Column", + "Cell", + "RowRange", + "RowSet", + "ColumnRange", + "TimestampRange", + "ValueRange", + "RowFilter", + "Mutation", + "ReadModifyWriteRule", + "ReadRowsRequest", + "ReadRowsResponse", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", +) diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py new file mode 100644 index 000000000..83def634e --- /dev/null +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -0,0 +1,463 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_v2.types import data +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as gr_status # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "ReadRowsRequest", + "ReadRowsResponse", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", + }, +) + + +class ReadRowsRequest(proto.Message): + r"""Request message for Bigtable.ReadRows. + + Attributes: + table_name (str): + Required. The unique name of the table from which to read. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + rows (google.cloud.bigtable_v2.types.RowSet): + The row keys and/or ranges to read. If not + specified, reads from all rows. + filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the contents of the + specified row(s). If unset, reads the entirety + of each row. + rows_limit (int): + The read will terminate after committing to N + rows' worth of results. The default (zero) is to + return all results. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=5) + + rows = proto.Field(proto.MESSAGE, number=2, message=data.RowSet,) + + filter = proto.Field(proto.MESSAGE, number=3, message=data.RowFilter,) + + rows_limit = proto.Field(proto.INT64, number=4) + + +class ReadRowsResponse(proto.Message): + r"""Response message for Bigtable.ReadRows. + + Attributes: + chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): + A collection of a row's contents as part of + the read request. + last_scanned_row_key (bytes): + Optionally the server might return the row + key of the last row it has scanned. The client + can use this to construct a more efficient retry + request if needed: any row keys or portions of + ranges less than this row key can be dropped + from the request. This is primarily useful for + cases where the server has read a lot of data + that was filtered out since the last committed + row key, allowing the client to skip that work + on a retry. + """ + + class CellChunk(proto.Message): + r"""Specifies a piece of a row's contents returned as part of the + read response stream. + + Attributes: + row_key (bytes): + The row key for this chunk of data. If the + row key is empty, this CellChunk is a + continuation of the same row as the previous + CellChunk in the response stream, even if that + CellChunk was in a previous ReadRowsResponse + message. + family_name (google.protobuf.wrappers_pb2.StringValue): + The column family name for this chunk of data. If this + message is not present this CellChunk is a continuation of + the same column family as the previous CellChunk. The empty + string can occur as a column family name in a response so + clients must check explicitly for the presence of this + message, not just for ``family_name.value`` being non-empty. + qualifier (google.protobuf.wrappers_pb2.BytesValue): + The column qualifier for this chunk of data. If this message + is not present, this CellChunk is a continuation of the same + column as the previous CellChunk. Column qualifiers may be + empty so clients must check for the presence of this + message, not just for ``qualifier.value`` being non-empty. + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. Timestamps are only set in the first + CellChunk per cell (for cells split into multiple chunks). + labels (Sequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. Labels are only + set on the first CellChunk per cell. + value (bytes): + The value stored in the cell. Cell values + can be split across multiple CellChunks. In + that case only the value field will be set in + CellChunks after the first: the timestamp and + labels will only be present in the first + CellChunk, even if the first CellChunk came in a + previous ReadRowsResponse. + value_size (int): + If this CellChunk is part of a chunked cell value and this + is not the final chunk of that cell, value_size will be set + to the total length of the cell value. The client can use + this size to pre-allocate memory to hold the full cell + value. + reset_row (bool): + Indicates that the client should drop all previous chunks + for ``row_key``, as it will be re-read from the beginning. + commit_row (bool): + Indicates that the client can safely process all previous + chunks for ``row_key``, as its data has been fully read. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + family_name = proto.Field( + proto.MESSAGE, number=2, message=wrappers.StringValue, + ) + + qualifier = proto.Field(proto.MESSAGE, number=3, message=wrappers.BytesValue,) + + timestamp_micros = proto.Field(proto.INT64, number=4) + + labels = proto.RepeatedField(proto.STRING, number=5) + + value = proto.Field(proto.BYTES, number=6) + + value_size = proto.Field(proto.INT32, number=7) + + reset_row = proto.Field(proto.BOOL, number=8, oneof="row_status") + + commit_row = proto.Field(proto.BOOL, number=9, oneof="row_status") + + chunks = proto.RepeatedField(proto.MESSAGE, number=1, message=CellChunk,) + + last_scanned_row_key = proto.Field(proto.BYTES, number=2) + + +class SampleRowKeysRequest(proto.Message): + r"""Request message for Bigtable.SampleRowKeys. + + Attributes: + table_name (str): + Required. The unique name of the table from which to sample + row keys. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=2) + + +class SampleRowKeysResponse(proto.Message): + r"""Response message for Bigtable.SampleRowKeys. + + Attributes: + row_key (bytes): + Sorted streamed sequence of sample row keys + in the table. The table might have contents + before the first row key in the list and after + the last one, but a key containing the empty + string indicates "end of table" and will be the + last response given, if present. + Note that row keys in this list may not have + ever been written to or read from, and users + should therefore not make any assumptions about + the row key structure that are specific to their + use case. + offset_bytes (int): + Approximate total storage space used by all rows in the + table which precede ``row_key``. Buffering the contents of + all rows between two subsequent samples would require space + roughly equal to the difference in their ``offset_bytes`` + fields. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + offset_bytes = proto.Field(proto.INT64, number=2) + + +class MutateRowRequest(proto.Message): + r"""Request message for Bigtable.MutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the mutation + should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + mutation should be applied. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Entries are applied in order, + meaning that earlier mutations can be masked by + later ones. Must contain at least one entry and + at most 100000. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=4) + + row_key = proto.Field(proto.BYTES, number=2) + + mutations = proto.RepeatedField(proto.MESSAGE, number=3, message=data.Mutation,) + + +class MutateRowResponse(proto.Message): + r"""Response message for Bigtable.MutateRow.""" + + +class MutateRowsRequest(proto.Message): + r"""Request message for BigtableService.MutateRows. + + Attributes: + table_name (str): + Required. The unique name of the table to + which the mutations should be applied. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and corresponding + mutations to be applied in bulk. Each entry is + applied as an atomic mutation, but the entries + may be applied in arbitrary order (even between + entries for the same row). At least one entry + must be specified, and in total the entries can + contain at most 100000 mutations. + """ + + class Entry(proto.Message): + r"""A mutation for a given row. + + Attributes: + row_key (bytes): + The key of the row to which the ``mutations`` should be + applied. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Mutations are applied in + order, meaning that earlier mutations can be + masked by later ones. + You must specify at least one mutation. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + mutations = proto.RepeatedField(proto.MESSAGE, number=2, message=data.Mutation,) + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=3) + + entries = proto.RepeatedField(proto.MESSAGE, number=2, message=Entry,) + + +class MutateRowsResponse(proto.Message): + r"""Response message for BigtableService.MutateRows. + + Attributes: + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): + One or more results for Entries from the + batch request. + """ + + class Entry(proto.Message): + r"""The result of applying a passed mutation in the original + request. + + Attributes: + index (int): + The index into the original request's ``entries`` list of + the Entry for which a result is being reported. + status (google.rpc.status_pb2.Status): + The result of the request Entry identified by ``index``. + Depending on how requests are batched during execution, it + is possible for one Entry to fail due to an error with + another Entry. In the event that this occurs, the same error + will be reported for both entries. + """ + + index = proto.Field(proto.INT64, number=1) + + status = proto.Field(proto.MESSAGE, number=2, message=gr_status.Status,) + + entries = proto.RepeatedField(proto.MESSAGE, number=1, message=Entry,) + + +class CheckAndMutateRowRequest(proto.Message): + r"""Request message for Bigtable.CheckAndMutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + conditional mutation should be applied. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the specified + row. Depending on whether or not any results are yielded, + either ``true_mutations`` or ``false_mutations`` will be + executed. If unset, checks that the row contains any values + at all. + true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``false_mutations`` is empty, and at + most 100000. + false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``true_mutations`` is empty, and at + most 100000. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=7) + + row_key = proto.Field(proto.BYTES, number=2) + + predicate_filter = proto.Field(proto.MESSAGE, number=6, message=data.RowFilter,) + + true_mutations = proto.RepeatedField( + proto.MESSAGE, number=4, message=data.Mutation, + ) + + false_mutations = proto.RepeatedField( + proto.MESSAGE, number=5, message=data.Mutation, + ) + + +class CheckAndMutateRowResponse(proto.Message): + r"""Response message for Bigtable.CheckAndMutateRow. + + Attributes: + predicate_matched (bool): + Whether or not the request's ``predicate_filter`` yielded + any results for the specified row. + """ + + predicate_matched = proto.Field(proto.BOOL, number=1) + + +class ReadModifyWriteRowRequest(proto.Message): + r"""Request message for Bigtable.ReadModifyWriteRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + read/modify/write rules should be applied. + rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the specified + row's contents are to be transformed into + writes. Entries are applied in order, meaning + that earlier rules will affect the results of + later ones. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=4) + + row_key = proto.Field(proto.BYTES, number=2) + + rules = proto.RepeatedField( + proto.MESSAGE, number=3, message=data.ReadModifyWriteRule, + ) + + +class ReadModifyWriteRowResponse(proto.Message): + r"""Response message for Bigtable.ReadModifyWriteRow. + + Attributes: + row (google.cloud.bigtable_v2.types.Row): + A Row containing the new contents of all + cells modified by the request. + """ + + row = proto.Field(proto.MESSAGE, number=1, message=data.Row,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py new file mode 100644 index 000000000..eece89c5a --- /dev/null +++ b/google/cloud/bigtable_v2/types/data.py @@ -0,0 +1,728 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "Row", + "Family", + "Column", + "Cell", + "RowRange", + "RowSet", + "ColumnRange", + "TimestampRange", + "ValueRange", + "RowFilter", + "Mutation", + "ReadModifyWriteRule", + }, +) + + +class Row(proto.Message): + r"""Specifies the complete (requested) contents of a single row + of a table. Rows which exceed 256MiB in size cannot be read in + full. + + Attributes: + key (bytes): + The unique key which identifies this row + within its table. This is the same key that's + used to identify the row in, for example, a + MutateRowRequest. May contain any non-empty byte + string up to 4KiB in length. + families (Sequence[google.cloud.bigtable_v2.types.Family]): + May be empty, but only if the entire row is + empty. The mutual ordering of column families is + not specified. + """ + + key = proto.Field(proto.BYTES, number=1) + + families = proto.RepeatedField(proto.MESSAGE, number=2, message="Family",) + + +class Family(proto.Message): + r"""Specifies (some of) the contents of a single row/column + family intersection of a table. + + Attributes: + name (str): + The unique key which identifies this family within its row. + This is the same key that's used to identify the family in, + for example, a RowFilter which sets its + "family_name_regex_filter" field. Must match + ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors + may produce cells in a sentinel family with an empty name. + Must be no greater than 64 characters in length. + columns (Sequence[google.cloud.bigtable_v2.types.Column]): + Must not be empty. Sorted in order of + increasing "qualifier". + """ + + name = proto.Field(proto.STRING, number=1) + + columns = proto.RepeatedField(proto.MESSAGE, number=2, message="Column",) + + +class Column(proto.Message): + r"""Specifies (some of) the contents of a single row/column + intersection of a table. + + Attributes: + qualifier (bytes): + The unique key which identifies this column within its + family. This is the same key that's used to identify the + column in, for example, a RowFilter which sets its + ``column_qualifier_regex_filter`` field. May contain any + byte string, including the empty string, up to 16kiB in + length. + cells (Sequence[google.cloud.bigtable_v2.types.Cell]): + Must not be empty. Sorted in order of decreasing + "timestamp_micros". + """ + + qualifier = proto.Field(proto.BYTES, number=1) + + cells = proto.RepeatedField(proto.MESSAGE, number=2, message="Cell",) + + +class Cell(proto.Message): + r"""Specifies (some of) the contents of a single + row/column/timestamp of a table. + + Attributes: + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. + value (bytes): + The value stored in the cell. + May contain any byte string, including the empty + string, up to 100MiB in length. + labels (Sequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. + """ + + timestamp_micros = proto.Field(proto.INT64, number=1) + + value = proto.Field(proto.BYTES, number=2) + + labels = proto.RepeatedField(proto.STRING, number=3) + + +class RowRange(proto.Message): + r"""Specifies a contiguous range of rows. + + Attributes: + start_key_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_key_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_key_open (bytes): + Used when giving an exclusive upper bound for + the range. + end_key_closed (bytes): + Used when giving an inclusive upper bound for + the range. + """ + + start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key") + + start_key_open = proto.Field(proto.BYTES, number=2, oneof="start_key") + + end_key_open = proto.Field(proto.BYTES, number=3, oneof="end_key") + + end_key_closed = proto.Field(proto.BYTES, number=4, oneof="end_key") + + +class RowSet(proto.Message): + r"""Specifies a non-contiguous set of rows. + + Attributes: + row_keys (Sequence[bytes]): + Single rows included in the set. + row_ranges (Sequence[google.cloud.bigtable_v2.types.RowRange]): + Contiguous row ranges included in the set. + """ + + row_keys = proto.RepeatedField(proto.BYTES, number=1) + + row_ranges = proto.RepeatedField(proto.MESSAGE, number=2, message="RowRange",) + + +class ColumnRange(proto.Message): + r"""Specifies a contiguous range of columns within a single column + family. The range spans from : to + :, where both bounds can be either + inclusive or exclusive. + + Attributes: + family_name (str): + The name of the column family within which + this range falls. + start_qualifier_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_qualifier_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_qualifier_closed (bytes): + Used when giving an inclusive upper bound for + the range. + end_qualifier_open (bytes): + Used when giving an exclusive upper bound for + the range. + """ + + family_name = proto.Field(proto.STRING, number=1) + + start_qualifier_closed = proto.Field(proto.BYTES, number=2, oneof="start_qualifier") + + start_qualifier_open = proto.Field(proto.BYTES, number=3, oneof="start_qualifier") + + end_qualifier_closed = proto.Field(proto.BYTES, number=4, oneof="end_qualifier") + + end_qualifier_open = proto.Field(proto.BYTES, number=5, oneof="end_qualifier") + + +class TimestampRange(proto.Message): + r"""Specified a contiguous range of microsecond timestamps. + + Attributes: + start_timestamp_micros (int): + Inclusive lower bound. If left empty, + interpreted as 0. + end_timestamp_micros (int): + Exclusive upper bound. If left empty, + interpreted as infinity. + """ + + start_timestamp_micros = proto.Field(proto.INT64, number=1) + + end_timestamp_micros = proto.Field(proto.INT64, number=2) + + +class ValueRange(proto.Message): + r"""Specifies a contiguous range of raw byte values. + + Attributes: + start_value_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_value_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_value_closed (bytes): + Used when giving an inclusive upper bound for + the range. + end_value_open (bytes): + Used when giving an exclusive upper bound for + the range. + """ + + start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value") + + start_value_open = proto.Field(proto.BYTES, number=2, oneof="start_value") + + end_value_closed = proto.Field(proto.BYTES, number=3, oneof="end_value") + + end_value_open = proto.Field(proto.BYTES, number=4, oneof="end_value") + + +class RowFilter(proto.Message): + r"""Takes a row as input and produces an alternate view of the row based + on specified rules. For example, a RowFilter might trim down a row + to include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their + values. More complicated filters can be composed out of these + components to express requests such as, "within every column of a + particular family, give just the two most recent cells which are + older than timestamp X." + + There are two broad categories of RowFilters (true filters and + transformers), as well as two ways to compose simple filters into + more complex ones (chains and interleaves). They work as follows: + + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't + match the specified pattern. All regex true filters use RE2 + syntax (https://github.com/google/re2/wiki/Syntax) in raw byte + mode (RE2::Latin1), and are evaluated as full matches. An + important point to keep in mind is that ``RE2(.)`` is equivalent + by default to ``RE2([^\n])``, meaning that it does not match + newlines. When attempting to match an arbitrary byte, you should + therefore use the escape sequence ``\C``, which may need to be + further escaped as ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some + of its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value + with the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. + + The total serialized size of a RowFilter message must not exceed + 4096 bytes, and RowFilters may not be nested within each other (in + Chains or Interleaves) to a depth of more than 20. + + Attributes: + chain (google.cloud.bigtable_v2.types.RowFilter.Chain): + Applies several RowFilters to the data in + sequence, progressively narrowing the results. + interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): + Applies several RowFilters to the data in + parallel and combines the results. + condition (google.cloud.bigtable_v2.types.RowFilter.Condition): + Applies one of two possible RowFilters to the + data based on the output of a predicate + RowFilter. + sink (bool): + ADVANCED USE ONLY. Hook for introspection into the + RowFilter. Outputs all cells directly to the output of the + read rather than to any parent filter. Consider the + following example: + + :: + + Chain( + FamilyRegex("A"), + Interleave( + All(), + Chain(Label("foo"), Sink()) + ), + QualifierRegex("B") + ) + + A,A,1,w + A,B,2,x + B,B,4,z + | + FamilyRegex("A") + | + A,A,1,w + A,B,2,x + | + +------------+-------------+ + | | + All() Label(foo) + | | + A,A,1,w A,A,1,w,labels:[foo] + A,B,2,x A,B,2,x,labels:[foo] + | | + | Sink() --------------+ + | | | + +------------+ x------+ A,A,1,w,labels:[foo] + | A,B,2,x,labels:[foo] + A,A,1,w | + A,B,2,x | + | | + QualifierRegex("B") | + | | + A,B,2,x | + | | + +--------------------------------+ + | + A,A,1,w,labels:[foo] + A,B,2,x,labels:[foo] // could be switched + A,B,2,x // could be switched + + Despite being excluded by the qualifier filter, a copy of + every cell that reaches the sink is present in the final + result. + + As with an + [Interleave][google.bigtable.v2.RowFilter.Interleave], + duplicate cells are possible, and appear in an unspecified + mutual order. In this case we have a duplicate with column + "A:B" and timestamp 2, because one copy passed through the + all filter while the other was passed through the label and + sink. Note that one copy has label "foo", while the other + does not. + + Cannot be used within the ``predicate_filter``, + ``true_filter``, or ``false_filter`` of a + [Condition][google.bigtable.v2.RowFilter.Condition]. + pass_all_filter (bool): + Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + block_all_filter (bool): + Does not match any cells, regardless of + input. Useful for temporarily disabling just + part of a filter. + row_key_regex_filter (bytes): + Matches only cells from rows whose keys satisfy the given + RE2 regex. In other words, passes through the entire row + when the key matches, and otherwise produces an empty row. + Note that, since row keys can contain arbitrary bytes, the + ``\C`` escape sequence must be used if a true wildcard is + desired. The ``.`` character will not match the new line + character ``\n``, which may be present in a binary key. + row_sample_filter (float): + Matches all cells from a row with probability + p, and matches no cells from the row with + probability 1-p. + family_name_regex_filter (str): + Matches only cells from columns whose families satisfy the + given RE2 regex. For technical reasons, the regex must not + contain the ``:`` character, even if it is not being used as + a literal. Note that, since column families cannot contain + the new line character ``\n``, it is sufficient to use ``.`` + as a full wildcard when matching column family names. + column_qualifier_regex_filter (bytes): + Matches only cells from columns whose qualifiers satisfy the + given RE2 regex. Note that, since column qualifiers can + contain arbitrary bytes, the ``\C`` escape sequence must be + used if a true wildcard is desired. The ``.`` character will + not match the new line character ``\n``, which may be + present in a binary qualifier. + column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): + Matches only cells from columns within the + given range. + timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): + Matches only cells with timestamps within the + given range. + value_regex_filter (bytes): + Matches only cells with values that satisfy the given + regular expression. Note that, since cell values can contain + arbitrary bytes, the ``\C`` escape sequence must be used if + a true wildcard is desired. The ``.`` character will not + match the new line character ``\n``, which may be present in + a binary value. + value_range_filter (google.cloud.bigtable_v2.types.ValueRange): + Matches only cells with values that fall + within the given range. + cells_per_row_offset_filter (int): + Skips the first N cells of each row, matching + all subsequent cells. If duplicate cells are + present, as is possible when using an + Interleave, each copy of the cell is counted + separately. + cells_per_row_limit_filter (int): + Matches only the first N cells of each row. + If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell + is counted separately. + cells_per_column_limit_filter (int): + Matches only the most recent N cells within each column. For + example, if N=2, this filter would match column ``foo:bar`` + at timestamps 10 and 9, skip all earlier cells in + ``foo:bar``, and then begin matching again in column + ``foo:bar2``. If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell is counted + separately. + strip_value_transformer (bool): + Replaces each cell's value with the empty + string. + apply_label_transformer (str): + Applies the given label to all cells in the output row. This + allows the client to determine which results were produced + from which part of the filter. + + Values must be at most 15 characters in length, and match + the RE2 pattern ``[a-z0-9\\-]+`` + + Due to a technical limitation, it is not currently possible + to apply multiple labels to a cell. As a result, a Chain may + have no more than one sub-filter which contains a + ``apply_label_transformer``. It is okay for an Interleave to + contain multiple ``apply_label_transformers``, as they will + be applied to separate copies of the input. This may be + relaxed in the future. + """ + + class Chain(proto.Message): + r"""A RowFilter which sends rows through several RowFilters in + sequence. + + Attributes: + filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" are chained + together to process the input row: in row -> + f(0) -> intermediate row -> f(1) -> ... -> f(N) + -> out row The full chain is executed + atomically. + """ + + filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + + class Interleave(proto.Message): + r"""A RowFilter which sends each row to each of several component + RowFilters and interleaves the results. + + Attributes: + filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" all process a copy of the input + row, and the results are pooled, sorted, and combined into a + single output row. If multiple cells are produced with the + same column and timestamp, they will all appear in the + output row in an unspecified mutual order. Consider the + following example, with three filters: + + :: + + input row + | + ----------------------------------------------------- + | | | + f(0) f(1) f(2) + | | | + 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + 2: foo,blah,11,z far,blah,5,x far,blah,5,x + | | | + ----------------------------------------------------- + | + 1: foo,bar,10,z // could have switched with #2 + 2: foo,bar,10,x // could have switched with #1 + 3: foo,blah,11,z + 4: far,bar,7,a + 5: far,blah,5,x // identical to #6 + 6: far,blah,5,x // identical to #5 + + All interleaved filters are executed atomically. + """ + + filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + + class Condition(proto.Message): + r"""A RowFilter which evaluates one of two possible RowFilters, + depending on whether or not a predicate RowFilter outputs any + cells from the input row. + IMPORTANT NOTE: The predicate filter does not execute atomically + with the true and false filters, which may lead to inconsistent + or unexpected results. Additionally, Condition filters have poor + performance, especially when filters are set for the false + condition. + + Attributes: + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + If ``predicate_filter`` outputs any cells, then + ``true_filter`` will be evaluated on the input row. + Otherwise, ``false_filter`` will be evaluated. + true_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + returns any results. If not provided, no results will be + returned in the true case. + false_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + does not return any results. If not provided, no results + will be returned in the false case. + """ + + predicate_filter = proto.Field(proto.MESSAGE, number=1, message="RowFilter",) + + true_filter = proto.Field(proto.MESSAGE, number=2, message="RowFilter",) + + false_filter = proto.Field(proto.MESSAGE, number=3, message="RowFilter",) + + chain = proto.Field(proto.MESSAGE, number=1, oneof="filter", message=Chain,) + + interleave = proto.Field( + proto.MESSAGE, number=2, oneof="filter", message=Interleave, + ) + + condition = proto.Field(proto.MESSAGE, number=3, oneof="filter", message=Condition,) + + sink = proto.Field(proto.BOOL, number=16, oneof="filter") + + pass_all_filter = proto.Field(proto.BOOL, number=17, oneof="filter") + + block_all_filter = proto.Field(proto.BOOL, number=18, oneof="filter") + + row_key_regex_filter = proto.Field(proto.BYTES, number=4, oneof="filter") + + row_sample_filter = proto.Field(proto.DOUBLE, number=14, oneof="filter") + + family_name_regex_filter = proto.Field(proto.STRING, number=5, oneof="filter") + + column_qualifier_regex_filter = proto.Field(proto.BYTES, number=6, oneof="filter") + + column_range_filter = proto.Field( + proto.MESSAGE, number=7, oneof="filter", message="ColumnRange", + ) + + timestamp_range_filter = proto.Field( + proto.MESSAGE, number=8, oneof="filter", message="TimestampRange", + ) + + value_regex_filter = proto.Field(proto.BYTES, number=9, oneof="filter") + + value_range_filter = proto.Field( + proto.MESSAGE, number=15, oneof="filter", message="ValueRange", + ) + + cells_per_row_offset_filter = proto.Field(proto.INT32, number=10, oneof="filter") + + cells_per_row_limit_filter = proto.Field(proto.INT32, number=11, oneof="filter") + + cells_per_column_limit_filter = proto.Field(proto.INT32, number=12, oneof="filter") + + strip_value_transformer = proto.Field(proto.BOOL, number=13, oneof="filter") + + apply_label_transformer = proto.Field(proto.STRING, number=19, oneof="filter") + + +class Mutation(proto.Message): + r"""Specifies a particular change to be made to the contents of a + row. + + Attributes: + set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): + Set a cell's value. + delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): + Deletes cells from a column. + delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): + Deletes cells from a column family. + delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): + Deletes cells from the entire row. + """ + + class SetCell(proto.Message): + r"""A Mutation which sets the value of the specified cell. + + Attributes: + family_name (str): + The name of the family into which new data should be + written. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column into which new + data should be written. Can be any byte string, + including the empty string. + timestamp_micros (int): + The timestamp of the cell into which new data + should be written. Use -1 for current Bigtable + server time. Otherwise, the client should set + this value itself, noting that the default value + is a timestamp of zero if the field is left + unspecified. Values must match the granularity + of the table (e.g. micros, millis). + value (bytes): + The value to be written into the specified + cell. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + timestamp_micros = proto.Field(proto.INT64, number=3) + + value = proto.Field(proto.BYTES, number=4) + + class DeleteFromColumn(proto.Message): + r"""A Mutation which deletes cells from the specified column, + optionally restricting the deletions to a given timestamp range. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column from which cells + should be deleted. Can be any byte string, + including the empty string. + time_range (google.cloud.bigtable_v2.types.TimestampRange): + The range of timestamps within which cells + should be deleted. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + time_range = proto.Field(proto.MESSAGE, number=3, message="TimestampRange",) + + class DeleteFromFamily(proto.Message): + r"""A Mutation which deletes all cells from the specified column + family. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + """ + + family_name = proto.Field(proto.STRING, number=1) + + class DeleteFromRow(proto.Message): + r"""A Mutation which deletes all cells from the containing row.""" + + set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,) + + delete_from_column = proto.Field( + proto.MESSAGE, number=2, oneof="mutation", message=DeleteFromColumn, + ) + + delete_from_family = proto.Field( + proto.MESSAGE, number=3, oneof="mutation", message=DeleteFromFamily, + ) + + delete_from_row = proto.Field( + proto.MESSAGE, number=4, oneof="mutation", message=DeleteFromRow, + ) + + +class ReadModifyWriteRule(proto.Message): + r"""Specifies an atomic read/modify/write operation on the latest + value of the specified column. + + Attributes: + family_name (str): + The name of the family to which the read/modify/write should + be applied. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column to which the + read/modify/write should be applied. + Can be any byte string, including the empty + string. + append_value (bytes): + Rule specifying that ``append_value`` be appended to the + existing value. If the targeted cell is unset, it will be + treated as containing the empty string. + increment_amount (int): + Rule specifying that ``increment_amount`` be added to the + existing value. If the targeted cell is unset, it will be + treated as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request will fail. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + append_value = proto.Field(proto.BYTES, number=3, oneof="rule") + + increment_amount = proto.Field(proto.INT64, number=4, oneof="rule") + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index 7947441c6..70d9c13c2 100644 --- a/noxfile.py +++ b/noxfile.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Generated by synthtool. DO NOT EDIT! + from __future__ import absolute_import import os import shutil @@ -21,10 +23,24 @@ import nox +BLACK_VERSION = "black==19.10b0" +BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] + DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] -LOCAL_DEPS = () +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): @@ -33,13 +49,9 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", "black", *LOCAL_DEPS) + session.install("flake8", BLACK_VERSION) session.run( - "black", - "--check", - "google", - "tests", - "docs", + "black", "--check", *BLACK_PATHS, ) session.run("flake8", "google", "tests") @@ -49,13 +61,14 @@ def blacken(session): """Run black. Format code to uniform standard. + + This currently uses Python 3.6 due to the automated Kokoro run of synthtool. + That run uses an image that doesn't have 3.6 installed. Before updating this + check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ - session.install("black") + session.install(BLACK_VERSION) session.run( - "black", - "google", - "tests", - "docs", + "black", *BLACK_PATHS, ) @@ -68,17 +81,20 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) + session.install("asyncmock", "pytest-asyncio") + + session.install( + "mock", "pytest", "pytest-cov", + ) + session.install("-e", ".") # Run py.test against the unit tests. session.run( "py.test", "--quiet", - "--cov=google.cloud", - "--cov=tests.unit", + "--cov=google/cloud", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", @@ -94,24 +110,15 @@ def unit(session): default(session) -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") - - session.run("coverage", "erase") - - @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") @@ -127,10 +134,9 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "test_utils/") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) session.install("-e", ".") # Run py.test against the system tests. @@ -140,32 +146,17 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def snippets(session): - """Run the documentation example snippets.""" - # Sanity check: Only run snippets system tests if the environment variable - # is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable.") +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. - # Install all test dependencies, then install local packages in place. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "test_utils/") - session.install("-e", ".") - session.run( - "py.test", - "--quiet", - os.path.join("docs", "snippets.py"), - *session.posargs - ) - session.run( - "py.test", - "--quiet", - os.path.join("docs", "snippets_table.py"), - *session.posargs - ) + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") + + session.run("coverage", "erase") @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -189,12 +180,15 @@ def docs(session): os.path.join("docs", "_build", "html", ""), ) + @nox.session(python=DEFAULT_PYTHON_VERSION) def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/samples/beam/noxfile.py b/samples/beam/noxfile.py index ba55d7ce5..171bee657 100644 --- a/samples/beam/noxfile.py +++ b/samples/beam/noxfile.py @@ -87,7 +87,8 @@ def get_pytest_env_vars(): TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# todo(kolea2): temporary workaround to install pinned dep version +INSTALL_LIBRARY_FROM_SOURCE = False # # Style Checks # diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt index cb0825c6f..69b59d1e2 100644 --- a/samples/beam/requirements.txt +++ b/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.27.0 -google-cloud-bigtable==1.6.1 +google-cloud-bigtable<2.0.0dev1 google-cloud-core==1.6.0 \ No newline at end of file diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py new file mode 100644 index 000000000..d30de39db --- /dev/null +++ b/scripts/fixup_bigtable_admin_v2_keywords.py @@ -0,0 +1,216 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtable_adminCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_consistency': ('name', 'consistency_token', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtable_adminCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable_admin client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py new file mode 100644 index 000000000..e1ff816ee --- /dev/null +++ b/scripts/fixup_bigtable_v2_keywords.py @@ -0,0 +1,184 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtableCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), + 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), + 'sample_row_keys': ('table_name', 'app_profile_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtableCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index bfb6240f5..c1fa1311c 100644 --- a/setup.py +++ b/setup.py @@ -20,41 +20,44 @@ # Package metadata. -name = 'google-cloud-bigtable' -description = 'Google Cloud Bigtable API client library' +name = "google-cloud-bigtable" +description = "Google Cloud Bigtable API client library" version = "1.7.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' -release_status = 'Development Status :: 5 - Production/Stable' +release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", + "proto-plus >= 1.13.0", + "libcst >= 0.2.5", ] -extras = { -} +extras = {} # Setup boilerplate below this line. package_root = os.path.abspath(os.path.dirname(__file__)) -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: readme = readme_file.read() # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() - if package.startswith('google')] + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] # Determine which namespaces are needed. -namespaces = ['google'] -if 'google.cloud' in packages: - namespaces.append('google.cloud') +namespaces = ["google"] +if "google.cloud" in packages: + namespaces.append("google.cloud") setuptools.setup( @@ -62,30 +65,30 @@ version=version, description=description, long_description=readme, - author='Google LLC', - author_email='googleapis-packages@google.com', - license='Apache 2.0', - url='https://github.com/googleapis/python-bigtable', + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url="https://github.com/googleapis/python-bigtable", classifiers=[ release_status, - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Operating System :: OS Independent', - 'Topic :: Internet', + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Operating System :: OS Independent", + "Topic :: Internet", ], - platforms='Posix; MacOS X; Windows', + platforms="Posix; MacOS X; Windows", packages=packages, namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', + scripts=[ + "scripts/fixup_bigtable_v2_keywords.py", + "scripts/fixup_bigtable_admin_v2_keywords.py", + ], + python_requires=">=3.6", include_package_data=True, zip_safe=False, ) diff --git a/synth.py b/synth.py index 07c293339..e2fda520a 100644 --- a/synth.py +++ b/synth.py @@ -33,6 +33,7 @@ s.move(library / "google/cloud/bigtable_v2") s.move(library / "tests") +s.move(library / "scripts") # Generate admin client library = gapic.py_library( @@ -44,58 +45,16 @@ s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") - -# ---------------------------------------------------------------------------- -# Work around non-standard installations (missing setuptools). -# -# These replacements can be removed after migrating to the microgenerator, -# which will generate them directly. -# ---------------------------------------------------------------------------- - -admin_clients = [ - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", -] - -s.replace( - admin_clients, - """\ -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( - 'google-cloud-bigtable-admin', -\).version -""", - """\ -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None -""" -) - -s.replace( - "google/cloud/bigtable_v2/gapic/bigtable_client.py", - """\ -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( - 'google-cloud-bigtable', -\).version -""", - """\ -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None -""" -) +s.move(library / "scripts") # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=99, samples=True) -s.move(templated_files, excludes=['noxfile.py']) +templated_files = common.py_library( + samples=True, # set to True only if there are samples + microgenerator=True, +) +s.move(templated_files, excludes=[".coveragerc"]) # ---------------------------------------------------------------------------- # Samples templates diff --git a/tests/system.py b/tests/system.py index daf644ea2..84f9977e1 100644 --- a/tests/system.py +++ b/tests/system.py @@ -18,6 +18,7 @@ import time import unittest +from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests from google.cloud.environment_vars import BIGTABLE_EMULATOR @@ -41,9 +42,10 @@ from google.cloud.bigtable.row_data import PartialRowData from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client_config as table_admin_config, -) + +# from google.cloud.bigtable_admin_v2.gapic import ( +# bigtable_table_admin_client_config as table_admin_config, +# ) UNIQUE_SUFFIX = unique_resource_id("-") LOCATION_ID = "us-central1-c" @@ -104,11 +106,11 @@ def setUpModule(): from google.cloud.bigtable.enums import Instance # See: https://github.com/googleapis/google-cloud-python/issues/5928 - interfaces = table_admin_config.config["interfaces"] - iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - methods = iface_config["methods"] - create_table = methods["CreateTable"] - create_table["timeout_millis"] = 90000 + # interfaces = table_admin_config.config["interfaces"] + # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] + # methods = iface_config["methods"] + # create_table = methods["CreateTable"] + # create_table["timeout_millis"] = 90000 Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None @@ -838,6 +840,8 @@ def test_delete_column_family(self): self.assertEqual(temp_table.list_column_families(), {}) def test_backup(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + temp_table_id = "test-backup-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -876,11 +880,16 @@ def test_backup(self): # Testing `Backup.update_expire_time()` method expire += 3600 # A one-hour change in the `expire_time` parameter - temp_backup.update_expire_time(datetime.datetime.utcfromtimestamp(expire)) + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) # Testing `Backup.get()` method temp_table_backup = temp_backup.get() - self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + test.seconds, + DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), + ) # Testing `Table.restore()` and `Backup.retore()` methods restored_table_id = "test-backup-table-restored" diff --git a/tests/unit/gapic/bigtable_admin_v2/__init__.py b/tests/unit/gapic/bigtable_admin_v2/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py new file mode 100644 index 000000000..5c6752cac --- /dev/null +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -0,0 +1,5316 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminAsyncClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test_bigtable_instance_admin_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableInstanceAdminClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] +) +def test_bigtable_instance_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_instance_admin_client_get_transport_class(): + transport = BigtableInstanceAdminClient.get_transport_class() + available_transports = [ + transports.BigtableInstanceAdminGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableInstanceAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableInstanceAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + BigtableInstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + "true", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + "false", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableInstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_instance_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_instance_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_instance_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_instance_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableInstanceAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_instance_from_dict(): + test_create_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_instance_async_from_dict(): + await test_create_instance_async(request_type=dict) + + +def test_create_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].clusters == { + "key_value": gba_instance.Cluster(name="name_value") + } + + +def test_create_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +@pytest.mark.asyncio +async def test_create_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].clusters == { + "key_value": gba_instance.Cluster(name="name_value") + } + + +@pytest.mark.asyncio +async def test_create_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +def test_get_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + + response = client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +def test_get_instance_from_dict(): + test_get_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + ) + + response = await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +@pytest.mark.asyncio +async def test_get_instance_async_from_dict(): + await test_get_instance_async(request_type=dict) + + +def test_get_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = instance.Instance() + + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +def test_list_instances( + transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + response = client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + + assert response.raw_page is response + + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +def test_list_instances_from_dict(): + test_list_instances(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_instances_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListInstancesRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_instances_async_from_dict(): + await test_list_instances_async(request_type=dict) + + +def test_list_instances_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_instances_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse() + ) + + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_instances_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_instances_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_instances_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_instances_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +def test_update_instance(transport: str = "grpc", request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + + response = client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +def test_update_instance_from_dict(): + test_update_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_instance_async( + transport: str = "grpc_asyncio", request_type=instance.Instance +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + ) + + response = await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +@pytest.mark.asyncio +async def test_update_instance_async_from_dict(): + await test_update_instance_async(request_type=dict) + + +def test_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = instance.Instance() + + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_partial_update_instance( + transport: str = "grpc", + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_instance_from_dict(): + test_partial_update_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_partial_update_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_partial_update_instance_async_from_dict(): + await test_partial_update_instance_async(request_type=dict) + + +def test_partial_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_partial_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +def test_partial_update_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.partial_update_instance( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_partial_update_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.partial_update_instance( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_from_dict(): + test_delete_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_instance_async_from_dict(): + await test_delete_instance_async(request_type=dict) + + +def test_delete_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +def test_create_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].cluster_id == "cluster_id_value" + + assert args[0].cluster == instance.Cluster(name="name_value") + + +def test_create_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].cluster_id == "cluster_id_value" + + assert args[0].cluster == instance.Cluster(name="name_value") + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +def test_get_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Cluster) + + assert response.name == "name_value" + + assert response.location == "location_value" + + assert response.state == instance.Cluster.State.READY + + assert response.serve_nodes == 1181 + + assert response.default_storage_type == common.StorageType.SSD + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + + assert response.name == "name_value" + + assert response.location == "location_value" + + assert response.state == instance.Cluster.State.READY + + assert response.serve_nodes == 1181 + + assert response.default_storage_type == common.StorageType.SSD + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = instance.Cluster() + + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), name="name_value", + ) + + +def test_list_clusters( + transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + + assert response.raw_page is response + + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_clusters_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListClustersRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse() + ) + + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_clusters_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_clusters_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + ) + + +def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_cluster_async( + transport: str = "grpc_asyncio", request_type=instance.Cluster +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = None + + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + ) + + +def test_create_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=None, + ) + + response = client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +def test_create_app_profile_from_dict(): + test_create_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", etag="etag_value", description="description_value", + ) + ) + + response = await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_app_profile_async_from_dict(): + await test_create_app_profile_async(request_type=dict) + + +def test_create_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = instance.AppProfile() + + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + + await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_app_profile( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + +def test_create_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_app_profile( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +def test_get_app_profile( + transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=None, + ) + + response = client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +def test_get_app_profile_from_dict(): + test_get_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", etag="etag_value", description="description_value", + ) + ) + + response = await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_app_profile_async_from_dict(): + await test_get_app_profile_async(request_type=dict) + + +def test_get_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = instance.AppProfile() + + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + + await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + ) + + +def test_list_app_profiles( + transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + + response = client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListAppProfilesPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.failed_locations == ["failed_locations_value"] + + +def test_list_app_profiles_from_dict(): + test_list_app_profiles(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_app_profiles_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + + response = await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.failed_locations == ["failed_locations_value"] + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_from_dict(): + await test_list_app_profiles_async(request_type=dict) + + +def test_list_app_profiles_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_app_profiles_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse() + ) + + await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_app_profiles_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_app_profiles(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_app_profiles_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_app_profiles(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + ) + + +def test_list_app_profiles_pager(): + client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_app_profiles(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) for i in results) + + +def test_list_app_profiles_pages(): + client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + pages = list(client.list_app_profiles(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + async_pager = await client.list_app_profiles(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.AppProfile) for i in responses) + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_app_profiles(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_app_profile_from_dict(): + test_update_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_app_profile_async_from_dict(): + await test_update_app_profile_async(request_type=dict) + + +def test_update_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ + "metadata" + ] + + +def test_update_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_app_profile( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_app_profile( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_from_dict(): + test_delete_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_app_profile_async_from_dict(): + await test_delete_app_profile_async(request_type=dict) + + +def test_delete_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = None + + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ) + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,) + + +def test_bigtable_instance_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableInstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_instance_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableInstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_instance", + "get_instance", + "list_instances", + "update_instance", + "partial_update_instance", + "delete_instance", + "create_cluster", + "get_cluster", + "list_clusters", + "update_cluster", + "delete_cluster", + "create_app_profile", + "get_app_profile", + "list_app_profiles", + "update_app_profile", + "delete_app_profile", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_bigtable_instance_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_instance_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport() + adc.assert_called_once() + + +def test_bigtable_instance_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableInstanceAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_instance_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableInstanceAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_instance_admin_host_no_port(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_instance_admin_host_with_port(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:8000" + + +def test_bigtable_instance_admin_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_instance_admin_grpc_lro_client(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_instance_admin_grpc_lro_async_client(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_app_profile_path(): + project = "squid" + instance = "clam" + app_profile = "whelk" + + expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, instance=instance, app_profile=app_profile, + ) + actual = BigtableInstanceAdminClient.app_profile_path( + project, instance, app_profile + ) + assert expected == actual + + +def test_parse_app_profile_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "app_profile": "nudibranch", + } + path = BigtableInstanceAdminClient.app_profile_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_app_profile_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "cuttlefish" + instance = "mussel" + cluster = "winkle" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "nautilus", + "instance": "scallop", + "cluster": "abalone", + } + path = BigtableInstanceAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_cluster_path(path) + assert expected == actual + + +def test_instance_path(): + project = "squid" + instance = "clam" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = BigtableInstanceAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = BigtableInstanceAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = BigtableInstanceAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableInstanceAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = BigtableInstanceAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableInstanceAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = BigtableInstanceAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = BigtableInstanceAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = BigtableInstanceAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableInstanceAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = BigtableInstanceAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableInstanceAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py new file mode 100644 index 000000000..92bdb8718 --- /dev/null +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -0,0 +1,6067 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminAsyncClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test_bigtable_table_admin_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableTableAdminClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] +) +def test_bigtable_table_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_table_admin_client_get_transport_class(): + transport = BigtableTableAdminClient.get_transport_class() + available_transports = [ + transports.BigtableTableAdminGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableTableAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableTableAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + BigtableTableAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminAsyncClient), +) +def test_bigtable_table_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + "true", + ), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + "false", + ), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableTableAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_table_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_table_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableTableAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_table( + transport: str = "grpc", request_type=bigtable_table_admin.CreateTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table( + name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, + ) + + response = client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gba_table.Table) + + assert response.name == "name_value" + + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + + +def test_create_table_from_dict(): + test_create_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + + assert response.name == "name_value" + + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_create_table_async_from_dict(): + await test_create_table_async(request_type=dict) + + +def test_create_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = gba_table.Table() + + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + + await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].table == gba_table.Table(name="name_value") + + +def test_create_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].table == gba_table.Table(name="name_value") + + +@pytest.mark.asyncio +async def test_create_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +def test_create_table_from_snapshot( + transport: str = "grpc", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_table_from_snapshot_from_dict(): + test_create_table_from_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async_from_dict(): + await test_create_table_from_snapshot_async(request_type=dict) + + +def test_create_table_from_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_table_from_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table_from_snapshot( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].source_snapshot == "source_snapshot_value" + + +def test_create_table_from_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table_from_snapshot( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].source_snapshot == "source_snapshot_value" + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_list_tables( + transport: str = "grpc", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTablesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tables_from_dict(): + test_list_tables(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_tables_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tables_async_from_dict(): + await test_list_tables_async(request_type=dict) + + +def test_list_tables_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tables_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse() + ) + + await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tables_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tables(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tables_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tables_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tables(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tables_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tables( + bigtable_table_admin.ListTablesRequest(), parent="parent_value", + ) + + +def test_list_tables_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tables(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + +def test_list_tables_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + pages = list(client.list_tables(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tables_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + async_pager = await client.list_tables(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Table) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tables_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tables(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_table( + transport: str = "grpc", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + + response = client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +def test_get_table_from_dict(): + test_get_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_table_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_get_table_async_from_dict(): + await test_get_table_async(request_type=dict) + + +def test_get_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = table.Table() + + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + + await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_table( + bigtable_table_admin.GetTableRequest(), name="name_value", + ) + + +def test_delete_table( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_table_from_dict(): + test_delete_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_table_async_from_dict(): + await test_delete_table_async(request_type=dict) + + +def test_delete_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = None + + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_table( + bigtable_table_admin.DeleteTableRequest(), name="name_value", + ) + + +def test_modify_column_families( + transport: str = "grpc", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + + response = client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +def test_modify_column_families_from_dict(): + test_modify_column_families(request_type=dict) + + +@pytest.mark.asyncio +async def test_modify_column_families_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_modify_column_families_async_from_dict(): + await test_modify_column_families_async(request_type=dict) + + +def test_modify_column_families_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_modify_column_families_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_modify_column_families_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].modifications == [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] + + +def test_modify_column_families_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].modifications == [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] + + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +def test_drop_row_range( + transport: str = "grpc", request_type=bigtable_table_admin.DropRowRangeRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_row_range_from_dict(): + test_drop_row_range(request_type=dict) + + +@pytest.mark.asyncio +async def test_drop_row_range_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DropRowRangeRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_drop_row_range_async_from_dict(): + await test_drop_row_range_async(request_type=dict) + + +def test_drop_row_range_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = None + + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_drop_row_range_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_generate_consistency_token( + transport: str = "grpc", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + + response = client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + + assert response.consistency_token == "consistency_token_value" + + +def test_generate_consistency_token_from_dict(): + test_generate_consistency_token(request_type=dict) + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + + response = await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + + assert response.consistency_token == "consistency_token_value" + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async_from_dict(): + await test_generate_consistency_token_async(request_type=dict) + + +def test_generate_consistency_token_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_consistency_token_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + + await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_generate_consistency_token_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_consistency_token(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_generate_consistency_token_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_consistency_token(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + ) + + +def test_check_consistency( + transport: str = "grpc", request_type=bigtable_table_admin.CheckConsistencyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + + response = client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + + assert response.consistent is True + + +def test_check_consistency_from_dict(): + test_check_consistency(request_type=dict) + + +@pytest.mark.asyncio +async def test_check_consistency_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse(consistent=True,) + ) + + response = await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + + assert response.consistent is True + + +@pytest.mark.asyncio +async def test_check_consistency_async_from_dict(): + await test_check_consistency_async(request_type=dict) + + +def test_check_consistency_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_check_consistency_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + + await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_check_consistency_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_consistency( + name="name_value", consistency_token="consistency_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].consistency_token == "consistency_token_value" + + +def test_check_consistency_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +@pytest.mark.asyncio +async def test_check_consistency_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_consistency( + name="name_value", consistency_token="consistency_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].consistency_token == "consistency_token_value" + + +@pytest.mark.asyncio +async def test_check_consistency_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_snapshot_table( + transport: str = "grpc", request_type=bigtable_table_admin.SnapshotTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_snapshot_table_from_dict(): + test_snapshot_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_snapshot_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.SnapshotTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_snapshot_table_async_from_dict(): + await test_snapshot_table_async(request_type=dict) + + +def test_snapshot_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_snapshot_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_snapshot_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].cluster == "cluster_value" + + assert args[0].snapshot_id == "snapshot_id_value" + + assert args[0].description == "description_value" + + +def test_snapshot_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].cluster == "cluster_value" + + assert args[0].snapshot_id == "snapshot_id_value" + + assert args[0].description == "description_value" + + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_get_snapshot( + transport: str = "grpc", request_type=bigtable_table_admin.GetSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + + response = client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Snapshot) + + assert response.name == "name_value" + + assert response.data_size_bytes == 1594 + + assert response.state == table.Snapshot.State.READY + + assert response.description == "description_value" + + +def test_get_snapshot_from_dict(): + test_get_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + + response = await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + + assert response.name == "name_value" + + assert response.data_size_bytes == 1594 + + assert response.state == table.Snapshot.State.READY + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_snapshot_async_from_dict(): + await test_get_snapshot_async(request_type=dict) + + +def test_get_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + + await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", + ) + + +def test_list_snapshots( + transport: str = "grpc", request_type=bigtable_table_admin.ListSnapshotsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListSnapshotsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_snapshots_from_dict(): + test_list_snapshots(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_snapshots_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_snapshots_async_from_dict(): + await test_list_snapshots_async(request_type=dict) + + +def test_list_snapshots_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_snapshots_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse() + ) + + await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_snapshots_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_snapshots(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_snapshots_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_snapshots(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + ) + + +def test_list_snapshots_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_snapshots(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) + + +def test_list_snapshots_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + pages = list(client.list_snapshots(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_snapshots_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + async_pager = await client.list_snapshots(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Snapshot) for i in responses) + + +@pytest.mark.asyncio +async def test_list_snapshots_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_snapshots(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_snapshot( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_snapshot_from_dict(): + test_delete_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_snapshot_async_from_dict(): + await test_delete_snapshot_async(request_type=dict) + + +def test_delete_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + ) + + +def test_create_backup( + transport: str = "grpc", request_type=bigtable_table_admin.CreateBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_backup_from_dict(): + test_create_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) + + +def test_create_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup_id == "backup_id_value" + + assert args[0].backup == table.Backup(name="name_value") + + +def test_create_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup_id == "backup_id_value" + + assert args[0].backup == table.Backup(name="name_value") + + +@pytest.mark.asyncio +async def test_create_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +def test_get_backup( + transport: str = "grpc", request_type=bigtable_table_admin.GetBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + response = client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +def test_get_backup_from_dict(): + test_get_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + + response = await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) + + +def test_get_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_backup( + bigtable_table_admin.GetBackupRequest(), name="name_value", + ) + + +def test_update_backup( + transport: str = "grpc", request_type=bigtable_table_admin.UpdateBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + response = client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +def test_update_backup_from_dict(): + test_update_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + + response = await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) + + +def test_update_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +def test_update_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].backup == table.Backup(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].backup == table.Backup(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_backup( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_from_dict(): + test_delete_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), name="name_value", + ) + + +def test_list_backups( + transport: str = "grpc", request_type=bigtable_table_admin.ListBackupsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListBackupsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_backups_from_dict(): + test_list_backups(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_backups_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListBackupsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) + + +def test_list_backups_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_backups_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_backups_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + ) + + +def test_list_backups_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backups(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + +def test_list_backups_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + async_pager = await client.list_backups(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Backup) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_backups(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_restore_table( + transport: str = "grpc", request_type=bigtable_table_admin.RestoreTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_restore_table_from_dict(): + test_restore_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_restore_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.RestoreTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_restore_table_async_from_dict(): + await test_restore_table_async(request_type=dict) + + +def test_restore_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_restore_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.BigtableTableAdminGrpcTransport,) + + +def test_bigtable_table_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTableAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_table_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTableAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_table", + "create_table_from_snapshot", + "list_tables", + "get_table", + "delete_table", + "modify_column_families", + "drop_row_range", + "generate_consistency_token", + "check_consistency", + "snapshot_table", + "get_snapshot", + "list_snapshots", + "delete_snapshot", + "create_backup", + "get_backup", + "update_backup", + "delete_backup", + "list_backups", + "restore_table", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_bigtable_table_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_table_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport() + adc.assert_called_once() + + +def test_bigtable_table_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableTableAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_table_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableTableAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_table_admin_host_no_port(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_table_admin_host_with_port(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:8000" + + +def test_bigtable_table_admin_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_table_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_table_admin_grpc_lro_client(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_table_admin_grpc_lro_async_client(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_backup_path(): + project = "squid" + instance = "clam" + cluster = "whelk" + backup = "octopus" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + project=project, instance=instance, cluster=cluster, backup=backup, + ) + actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) + assert expected == actual + + +def test_parse_backup_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "cluster": "cuttlefish", + "backup": "mussel", + } + path = BigtableTableAdminClient.backup_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_backup_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "abalone", + "instance": "squid", + "cluster": "clam", + } + path = BigtableTableAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_cluster_path(path) + assert expected == actual + + +def test_instance_path(): + project = "whelk" + instance = "octopus" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = BigtableTableAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + } + path = BigtableTableAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_snapshot_path(): + project = "cuttlefish" + instance = "mussel" + cluster = "winkle" + snapshot = "nautilus" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( + project=project, instance=instance, cluster=cluster, snapshot=snapshot, + ) + actual = BigtableTableAdminClient.snapshot_path( + project, instance, cluster, snapshot + ) + assert expected == actual + + +def test_parse_snapshot_path(): + expected = { + "project": "scallop", + "instance": "abalone", + "cluster": "squid", + "snapshot": "clam", + } + path = BigtableTableAdminClient.snapshot_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_snapshot_path(path) + assert expected == actual + + +def test_table_path(): + project = "whelk" + instance = "octopus" + table = "oyster" + + expected = "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + actual = BigtableTableAdminClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "nudibranch", + "instance": "cuttlefish", + "table": "mussel", + } + path = BigtableTableAdminClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_table_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableTableAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = BigtableTableAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableTableAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = BigtableTableAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableTableAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = BigtableTableAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + + expected = "projects/{project}".format(project=project,) + actual = BigtableTableAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = BigtableTableAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableTableAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = BigtableTableAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableTableAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableTableAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableTableAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/bigtable_v2/__init__.py b/tests/unit/gapic/bigtable_v2/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tests/unit/gapic/bigtable_v2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py new file mode 100644 index 000000000..0a42c2dad --- /dev/null +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -0,0 +1,2372 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable import BigtableClient +from google.cloud.bigtable_v2.services.bigtable import transports +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableClient._get_default_mtls_endpoint(None) is None + assert BigtableClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + BigtableClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +def test_bigtable_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtable.googleapis.com:443" + + +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +def test_bigtable_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtable.googleapis.com:443" + + +def test_bigtable_client_get_transport_class(): + transport = BigtableClient.get_transport_class() + available_transports = [ + transports.BigtableGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableClient.get_transport_class("grpc") + assert transport == transports.BigtableGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) +) +@mock.patch.object( + BigtableAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableAsyncClient), +) +def test_bigtable_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "true"), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "false"), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) +) +@mock.patch.object( + BigtableAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + response = client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ReadRowsResponse) + + +def test_read_rows_from_dict(): + test_read_rows(request_type=dict) + + +@pytest.mark.asyncio +async def test_read_rows_async( + transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + + response = await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ReadRowsResponse) + + +@pytest.mark.asyncio +async def test_read_rows_async_from_dict(): + await test_read_rows_async(request_type=dict) + + +def test_read_rows_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_rows_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + + await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_read_rows_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_rows( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_read_rows_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_read_rows_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_rows( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_read_rows_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys( + transport: str = "grpc", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + response = client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +def test_sample_row_keys_from_dict(): + test_sample_row_keys(request_type=dict) + + +@pytest.mark.asyncio +async def test_sample_row_keys_async( + transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + + response = await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +@pytest.mark.asyncio +async def test_sample_row_keys_async_from_dict(): + await test_sample_row_keys_async(request_type=dict) + + +def test_sample_row_keys_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_sample_row_keys_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + + await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_sample_row_keys_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.sample_row_keys( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_sample_row_keys_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.sample_row_keys( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + response = client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_from_dict(): + test_mutate_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_mutate_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + + response = await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +@pytest.mark.asyncio +async def test_mutate_row_async_from_dict(): + await test_mutate_row_async(request_type=dict) + + +def test_mutate_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_mutate_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + + await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_mutate_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_mutate_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_mutate_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_mutate_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + response = client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.MutateRowsResponse) + + +def test_mutate_rows_from_dict(): + test_mutate_rows(request_type=dict) + + +@pytest.mark.asyncio +async def test_mutate_rows_async( + transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + + response = await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.MutateRowsResponse) + + +@pytest.mark.asyncio +async def test_mutate_rows_async_from_dict(): + await test_mutate_rows_async(request_type=dict) + + +def test_mutate_rows_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_mutate_rows_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + + await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_mutate_rows_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_rows( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].entries == [ + bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_mutate_rows_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_rows( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].entries == [ + bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row( + transport: str = "grpc", request_type=bigtable.CheckAndMutateRowRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + + response = client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + + assert response.predicate_matched is True + + +def test_check_and_mutate_row_from_dict(): + test_check_and_mutate_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + ) + + response = await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + + assert response.predicate_matched is True + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async_from_dict(): + await test_check_and_mutate_row_async(request_type=dict) + + +def test_check_and_mutate_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse() + ) + + await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_check_and_mutate_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_and_mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].predicate_filter == data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]) + ) + ] + ) + ) + + assert args[0].true_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].false_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_check_and_mutate_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_and_mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].predicate_filter == data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]) + ) + ] + ) + ) + + assert args[0].true_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].false_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row( + transport: str = "grpc", request_type=bigtable.ReadModifyWriteRowRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + response = client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +def test_read_modify_write_row_from_dict(): + test_read_modify_write_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + + response = await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async_from_dict(): + await test_read_modify_write_row_async(request_type=dict) + + +def test_read_modify_write_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_modify_write_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + + await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_read_modify_write_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_modify_write_row( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].rules == [ + data.ReadModifyWriteRule(family_name="family_name_value") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_read_modify_write_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_modify_write_row( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].rules == [ + data.ReadModifyWriteRule(family_name="family_name_value") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.BigtableGrpcTransport,) + + +def test_bigtable_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "read_rows", + "sample_row_keys", + "mutate_row", + "mutate_rows", + "check_and_mutate_row", + "read_modify_write_row", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_bigtable_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport() + adc.assert_called_once() + + +def test_bigtable_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_host_no_port(): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtable.googleapis.com" + ), + ) + assert client.transport._host == "bigtable.googleapis.com:443" + + +def test_bigtable_host_with_port(): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtable.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtable.googleapis.com:8000" + + +def test_bigtable_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_table_path(): + project = "squid" + instance = "clam" + table = "whelk" + + expected = "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + actual = BigtableClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "table": "nudibranch", + } + path = BigtableClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_table_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = BigtableClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "winkle" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = BigtableClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "scallop" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = BigtableClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "squid" + + expected = "projects/{project}".format(project=project,) + actual = BigtableClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = BigtableClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "whelk" + location = "octopus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = BigtableClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/v2/test_bigtable_client_v2.py b/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef..000000000 --- a/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index df083406b..000000000 --- a/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,924 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - display_name = "displayName1615086568" - - response = client.update_instance(display_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - display_name = "displayName1615086568" - - with pytest.raises(CustomException): - client.update_instance(display_name) - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - client.delete_app_profile(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.delete_app_profile(name) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index 42db08579..000000000 --- a/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,1039 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) - - def test_create_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_backup(self): - # Setup Expected Response - name_2 = "name2-1052831874" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name_2, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = bigtable_table_admin_pb2.ListBackupsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_restore_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - exception = response.exception() - assert exception.errors[0] == error diff --git a/tests/unit/test_app_profile.py b/tests/unit/test_app_profile.py index f7ec0a855..d0a08c5e1 100644 --- a/tests/unit/test_app_profile.py +++ b/tests/unit/test_app_profile.py @@ -166,7 +166,7 @@ def test___ne__(self): self.assertTrue(app_profile1 != app_profile2) def test_from_pb_success_routing_any(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) @@ -195,7 +195,7 @@ def test_from_pb_success_routing_any(self): self.assertEqual(app_profile.allow_transactional_writes, False) def test_from_pb_success_routing_single(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) @@ -228,7 +228,7 @@ def test_from_pb_success_routing_single(self): ) def test_from_pb_bad_app_profile_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 bad_app_profile_name = "BAD_NAME" @@ -239,7 +239,7 @@ def test_from_pb_bad_app_profile_name(self): klass.from_pb(app_profile_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) @@ -253,7 +253,7 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -267,11 +267,13 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_reload_routing_any(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -305,7 +307,7 @@ def test_reload_routing_any(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client instance_stub.get_app_profile.side_effect = [response_pb] # Create expected_result. @@ -328,13 +330,13 @@ def test_reload_routing_any(self): ) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.api_core import exceptions - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -347,7 +349,7 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client instance_stub.get_app_profile.side_effect = [ response_pb, exceptions.NotFound("testing"), @@ -364,11 +366,10 @@ def test_exists(self): alt_app_profile.exists() def test_create_routing_any(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( @@ -386,23 +387,34 @@ def test_create_routing_any(self): routing_policy_type=routing, description=description, ) + expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) + name = instance.name + expected_request = { + "request": { + "parent": name, + "app_profile_id": self.APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, + } + } + + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + instance_api.instance_path.return_value = name + instance_api.create_app_profile.return_value = expected_request_app_profile # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) client._instance_admin_client = instance_api + app_profile._instance._client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, self._get_target_class()) @@ -414,11 +426,10 @@ def test_create_routing_any(self): self.assertIsNone(result.cluster_id) def test_create_routing_single(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( @@ -440,22 +451,30 @@ def test_create_routing_single(self): allow_transactional_writes=allow_writes, ) expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) + instance_name = instance.name + expected_request = { + "request": { + "parent": instance_name, + "app_profile_id": self.APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, + } + } # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" ) + instance_api.instance_path.return_value = instance_name + instance_api.create_app_profile.return_value = expected_request_app_profile client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, self._get_target_class()) @@ -479,14 +498,15 @@ def test_create_app_profile_with_wrong_routing_policy(self): app_profile.create() def test_update_app_profile_routing_any(self): - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.protobuf import field_mask_pb2 credentials = _make_credentials() @@ -510,19 +530,20 @@ def test_update_app_profile_routing_any(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + client._instance_admin_client = instance_api # Perform the method and check the result. @@ -530,29 +551,38 @@ def test_update_app_profile_routing_any(self): expected_request_update_mask = field_mask_pb2.FieldMask( paths=["description", "single_cluster_routing"] ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, + } + } + + instance_api.update_app_profile.return_value = response_pb + app_profile._instance._client._instance_admin_client = instance_api result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) + self.assertEqual( + result.metadata.type_url, + "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", + ) def test_update_app_profile_routing_single(self): - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.protobuf import field_mask_pb2 credentials = _make_credentials() @@ -569,39 +599,43 @@ def test_update_app_profile_routing_single(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) client._instance_admin_client = instance_api - + client._instance_admin_client.update_app_profile.return_value = response_pb # Perform the method and check the result. ignore_warnings = True expected_request_update_mask = field_mask_pb2.FieldMask( paths=["multi_cluster_routing_use_any"] ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, + } + } result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] - + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) + self.assertEqual( + result.metadata.type_url, + "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", + ) def test_update_app_profile_with_wrong_routing_policy(self): credentials = _make_credentials() @@ -617,12 +651,12 @@ def test_update_app_profile_with_wrong_routing_policy(self): def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py index 0285d668b..68e5f6162 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/test_backup.py @@ -93,35 +93,35 @@ def test_constructor_non_defaults(self): self.assertIsNone(backup._state) def test_from_pb_project_mismatch(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table alt_project_id = "alt-project-id" client = _Client(project=alt_project_id) instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = table.Backup(name=self.BACKUP_NAME) klasse = self._get_target_class() with self.assertRaises(ValueError): klasse.from_pb(backup_pb, instance) def test_from_pb_instance_mismatch(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table alt_instance = "/projects/%s/instances/alt-instance" % self.PROJECT_ID client = _Client() instance = _Instance(alt_instance, client) - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = table.Backup(name=self.BACKUP_NAME) klasse = self._get_target_class() with self.assertRaises(ValueError): klasse.from_pb(backup_pb, instance) def test_from_pb_bad_name(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table_pb2.Backup(name="invalid_name") + backup_pb = table.Backup(name="invalid_name") klasse = self._get_target_class() with self.assertRaises(ValueError): @@ -129,7 +129,7 @@ def test_from_pb_bad_name(self): def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp client = _Client() @@ -137,7 +137,7 @@ def test_from_pb_success(self): timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) size_bytes = 1234 state = enums.Backup.State.READY - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -156,16 +156,18 @@ def test_from_pb_success(self): self.assertEqual(backup.cluster, self.CLUSTER_ID) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertEqual(backup._expire_time, timestamp) - self.assertEqual(backup._start_time, timestamp) - self.assertEqual(backup._end_time, timestamp) + self.assertEqual(backup.start_time, timestamp) + self.assertEqual(backup.end_time, timestamp) self.assertEqual(backup._size_bytes, size_bytes) self.assertEqual(backup._state, state) def test_property_name(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -186,17 +188,16 @@ def test_property_cluster_setter(self): self.assertEqual(backup.cluster, self.CLUSTER_ID) def test_property_parent_none(self): - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - ) + backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),) self.assertIsNone(backup.parent) def test_property_parent_w_cluster(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -208,9 +209,11 @@ def test_property_parent_w_cluster(self): def test_property_source_table_none(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -221,9 +224,11 @@ def test_property_source_table_none(self): def test_property_source_table_valid(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -298,10 +303,10 @@ def test_create_grpc_error(self): from google.api_core.exceptions import GoogleAPICallError from google.api_core.exceptions import Unknown from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -312,7 +317,7 @@ def test_create_grpc_error(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -321,18 +326,20 @@ def test_create_grpc_error(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_already_exists(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud.exceptions import Conflict client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Conflict("testing") timestamp = self._make_timestamp() @@ -343,7 +350,7 @@ def test_create_already_exists(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -352,18 +359,20 @@ def test_create_already_exists(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_instance_not_found(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = NotFound("testing") timestamp = self._make_timestamp() @@ -374,7 +383,7 @@ def test_create_instance_not_found(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -383,9 +392,11 @@ def test_create_instance_not_found(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_cluster_not_set(self): @@ -411,9 +422,7 @@ def test_create_table_not_set(self): def test_create_expire_time_not_set(self): backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - table_id=self.TABLE_ID, + self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID, ) with self.assertRaises(ValueError): @@ -421,11 +430,13 @@ def test_create_expire_time_not_set(self): def test_create_success(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable import Client op_future = object() - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.return_value = op_future timestamp = self._make_timestamp() @@ -436,7 +447,7 @@ def test_create_success(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -446,16 +457,18 @@ def test_create_success(self): self.assertIs(future, op_future) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_exists_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -463,14 +476,13 @@ def test_exists_grpc_error(self): with self.assertRaises(Unknown): backup.exists() - - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup(self.BACKUP_NAME) def test_exists_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -478,18 +490,18 @@ def test_exists_not_found(self): self.assertFalse(backup.exists()) - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_get(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) state = enums.Backup.State.READY client = _Client() - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -498,7 +510,7 @@ def test_get(self): size_bytes=0, state=state, ) - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -508,14 +520,14 @@ def test_get(self): def test_reload(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) state = enums.Backup.State.READY client = _Client() - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -524,7 +536,7 @@ def test_reload(self): size_bytes=0, state=state, ) - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -539,11 +551,11 @@ def test_reload(self): self.assertEqual(backup._state, state) def test_exists_success(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) - api = client.table_admin_client = self._make_table_admin_client() + backup_pb = table.Backup(name=self.BACKUP_NAME) + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -551,13 +563,13 @@ def test_exists_success(self): self.assertTrue(backup.exists()) - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -565,13 +577,13 @@ def test_delete_grpc_error(self): with self.assertRaises(Unknown): backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -579,29 +591,29 @@ def test_delete_not_found(self): with self.assertRaises(NotFound): backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_success(self): from google.protobuf.empty_pb2 import Empty client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.return_value = Empty() instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_update_expire_time_grpc_error(self): from google.api_core.exceptions import Unknown from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -610,24 +622,22 @@ def test_update_expire_time_grpc_error(self): with self.assertRaises(Unknown): backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_update_expire_time_not_found(self): from google.api_core.exceptions import NotFound from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -636,38 +646,34 @@ def test_update_expire_time_not_found(self): with self.assertRaises(NotFound): backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_update_expire_time_success(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.update_backup.return_type = table_pb2.Backup(name=self.BACKUP_NAME) + api = client._table_admin_client = self._make_table_admin_client() + api.update_backup.return_type = table.Backup(name=self.BACKUP_NAME) instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) expire_time = self._make_timestamp() backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_restore_grpc_error(self): @@ -675,7 +681,7 @@ def test_restore_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.restore_table.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -691,14 +697,16 @@ def test_restore_grpc_error(self): backup.restore(self.TABLE_ID) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_restore_cluster_not_set(self): client = _Client() - client.table_admin_client = self._make_table_admin_client() + client._table_admin_client = self._make_table_admin_client() backup = self._make_one( self.BACKUP_ID, _Instance(self.INSTANCE_NAME, client=client), @@ -712,7 +720,7 @@ def test_restore_cluster_not_set(self): def test_restore_success(self): op_future = object() client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.restore_table.return_value = op_future timestamp = self._make_timestamp() @@ -729,14 +737,18 @@ def test_restore_success(self): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_get_iam_policy(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -752,15 +764,15 @@ def test_get_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy result = backup.get_iam_policy() - table_api.get_iam_policy.assert_called_once_with(resource=backup.name) + table_api.get_iam_policy.assert_called_once_with( + request={"resource": backup.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -771,7 +783,9 @@ def test_get_iam_policy(self): def test_set_iam_policy(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -788,9 +802,7 @@ def test_set_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -803,7 +815,7 @@ def test_set_iam_policy(self): result = backup.set_iam_policy(iam_policy) table_api.set_iam_policy.assert_called_once_with( - resource=backup.name, policy=iam_policy_pb + request={"resource": backup.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -815,7 +827,9 @@ def test_set_iam_policy(self): def test_test_iam_permissions(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -828,9 +842,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api @@ -838,7 +850,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) table_api.test_iam_permissions.assert_called_once_with( - resource=backup.name, permissions=permissions + request={"resource": backup.name, "permissions": permissions} ) diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 21ec479d0..60a2cd738 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -223,7 +223,7 @@ def test_table_data_client_not_initialized(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_info(self): @@ -237,7 +237,7 @@ def test_table_data_client_not_initialized_w_client_info(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_options(self): @@ -292,7 +292,7 @@ def test_table_admin_client_not_initialized_w_admin_flag(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_not_initialized_w_client_info(self): @@ -309,7 +309,7 @@ def test_table_admin_client_not_initialized_w_client_info(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_not_initialized_w_client_options(self): @@ -363,7 +363,7 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_not_initialized_w_client_info(self): @@ -380,7 +380,7 @@ def test_instance_admin_client_not_initialized_w_client_info(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_not_initialized_w_client_options(self): @@ -460,11 +460,13 @@ def test_instance_factory_non_defaults(self): self.assertIs(instance._client, client) def test_list_instances(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance FAILED_LOCATION = "FAILED" @@ -473,8 +475,9 @@ def test_list_instances(self): INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1 INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2 + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + client = self._make_one( project=self.PROJECT, credentials=credentials, admin=True ) @@ -490,8 +493,9 @@ def test_list_instances(self): # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = client.instance_admin_client.transport - bigtable_instance_stub.list_instances.side_effect = [response_pb] + instance_stub = client._instance_admin_client + + instance_stub.list_instances.side_effect = [response_pb] # Perform the method and check the result. instances, failed_locations = client.list_instances() @@ -499,26 +503,27 @@ def test_list_instances(self): instance_1, instance_2 = instances self.assertIsInstance(instance_1, Instance) - self.assertEqual(instance_1.name, INSTANCE_NAME1) + self.assertEqual(instance_1.instance_id, INSTANCE_ID1) self.assertTrue(instance_1._client is client) self.assertIsInstance(instance_2, Instance) - self.assertEqual(instance_2.name, INSTANCE_NAME2) + self.assertEqual(instance_2.instance_id, INSTANCE_ID2) self.assertTrue(instance_2._client is client) self.assertEqual(failed_locations, [FAILED_LOCATION]) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Cluster - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_one( project=self.PROJECT, credentials=credentials, admin=True @@ -553,7 +558,8 @@ def test_list_clusters(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client + instance_stub.list_clusters.side_effect = [response_pb] # Perform the method and check the result. @@ -562,15 +568,15 @@ def test_list_clusters(self): cluster_1, cluster_2, cluster_3 = clusters self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) + self.assertEqual(cluster_1.cluster_id, cluster_id1) self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1) self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) + self.assertEqual(cluster_2.cluster_id, cluster_id2) self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2) self.assertIsInstance(cluster_3, Cluster) - self.assertEqual(cluster_3.name, cluster_name3) + self.assertEqual(cluster_3.cluster_id, cluster_id3) self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2) self.assertEqual(failed_locations, [failed_location]) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 9a0d39c84..d5f731eb6 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -126,7 +126,7 @@ def test_name_property(self): self.assertEqual(cluster.name, self.CLUSTER_NAME) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums client = _Client(self.PROJECT) @@ -154,7 +154,7 @@ def test_from_pb_success(self): self.assertEqual(cluster.default_storage_type, storage_type) def test_from_pb_bad_cluster_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 bad_cluster_name = "BAD_NAME" @@ -165,7 +165,7 @@ def test_from_pb_bad_cluster_name(self): klass.from_pb(cluster_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) @@ -179,7 +179,7 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -222,12 +222,15 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -258,8 +261,8 @@ def test_reload(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport + instance_stub = client._instance_admin_client + instance_stub.get_cluster.side_effect = [response_pb] # Create expected_result. @@ -280,14 +283,14 @@ def test_reload(self): self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.api_core import exceptions - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -302,9 +305,9 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_cluster.side_effect = [ + bigtable_instance_stub = client._instance_admin_client + + bigtable_instance_stub.get_cluster.side_effect = [ response_pb, exceptions.NotFound("testing"), exceptions.BadRequest("testing"), @@ -321,19 +324,17 @@ def test_exists(self): def test_create(self): import datetime - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() @@ -352,53 +353,56 @@ def test_create(self): serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, ) - expected_request_cluster = instance_pb2.Cluster( + expected_request_cluster = instance_v2_pb2.Cluster( location=LOCATION, serve_nodes=cluster.serve_nodes, default_storage_type=cluster.default_storage_type, ) - expected_request = instance_v2_pb2.CreateClusterRequest( - parent=instance.name, - cluster_id=self.CLUSTER_ID, - cluster=expected_request_cluster, - ) - + expected_request = { + "request": { + "parent": instance.name, + "cluster_id": self.CLUSTER_ID, + "cluster": expected_request_cluster, + } + } + name = instance.name metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + api = mock.create_autospec(BigtableInstanceAdminClient) + api.common_location_path.return_value = LOCATION client._instance_admin_client = api - + cluster._instance._client = client + cluster._instance._client.instance_admin_client.instance_path.return_value = ( + name + ) + client._instance_admin_client.create_cluster.return_value = response_pb # Perform the method and check the result. - result = cluster.create() - actual_request = channel.requests[0][1] + cluster.create() + actual_request = client._instance_admin_client.create_cluster.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.CreateClusterMetadata) def test_update(self): import datetime - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() @@ -418,40 +422,45 @@ def test_update(self): default_storage_type=STORAGE_TYPE_SSD, ) # Create expected_request - expected_request = instance_pb2.Cluster( - name=cluster.name, serve_nodes=self.SERVE_NODES - ) - + expected_request = { + "request": { + "name": "projects/project/instances/instance-id/clusters/cluster-id", + "serve_nodes": 5, + "location": None, + } + } metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = api - + cluster._instance._client.instance_admin_client.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) # Perform the method and check the result. - result = cluster.update() - actual_request = channel.requests[0][1] + client._instance_admin_client.update_cluster.return_value = response_pb + cluster.update() + + actual_request = client._instance_admin_client.update_cluster.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -465,7 +474,7 @@ def test_delete(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport + instance_stub = instance_admin_client instance_stub.delete_cluster.side_effect = [response_pb] # Create expected_result. diff --git a/tests/unit/test_column_family.py b/tests/unit/test_column_family.py index d6f6c2672..601c37cf5 100644 --- a/tests/unit/test_column_family.py +++ b/tests/unit/test_column_family.py @@ -344,11 +344,13 @@ def test_to_pb_with_rule(self): self.assertEqual(pb_val, expected) def _create_test_helper(self, gc_rule=None): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) project_id = "project-id" zone = "zone" @@ -366,7 +368,8 @@ def _create_test_helper(self, gc_rule=None): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -380,7 +383,10 @@ def _create_test_helper(self, gc_rule=None): else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, create=column_family_pb) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.create = column_family_pb + request_pb.modifications.append(modification) # Create response_pb response_pb = _ColumnFamilyPB() @@ -409,10 +415,12 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client project_id = "project-id" zone = "zone" @@ -430,7 +438,7 @@ def _update_test_helper(self, gc_rule=None): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -444,7 +452,10 @@ def _update_test_helper(self, gc_rule=None): else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, update=column_family_pb) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.update = column_family_pb + request_pb.modifications.append(modification) # Create response_pb response_pb = _ColumnFamilyPB() @@ -473,11 +484,13 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) project_id = "project-id" zone = "zone" @@ -495,7 +508,7 @@ def test_delete(self): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -505,7 +518,10 @@ def test_delete(self): # Create request_pb request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, drop=True) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=column_family_id, drop=True + ) + request_pb.modifications.append(modification) # Create response_pb response_pb = empty_pb2.Empty() @@ -587,36 +603,40 @@ class MockProto(object): names = [] + _pb = {} + @classmethod def WhichOneof(cls, name): cls.names.append(name) return "unknown" + MockProto._pb = MockProto + self.assertEqual(MockProto.names, []) self.assertRaises(ValueError, self._call_fut, MockProto) self.assertEqual(MockProto.names, ["rule"]) def _GcRulePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index 14dd0bf58..e493fd9c8 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -97,14 +97,15 @@ def test_constructor_non_default(self): self.assertEqual(instance.state, state) def test__update_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums - instance_type = enums.Instance.Type.PRODUCTION + instance_type = data_v2_pb2.Instance.Type.PRODUCTION state = enums.Instance.State.READY + # todo type to type_? instance_pb = data_v2_pb2.Instance( display_name=self.DISPLAY_NAME, - type=instance_type, + type_=instance_type, labels=self.LABELS, state=state, ) @@ -113,14 +114,14 @@ def test__update_from_pb_success(self): self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) + instance._update_from_pb(instance_pb._pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) self.assertEqual(instance._state, state) def test__update_from_pb_success_defaults(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) @@ -129,13 +130,13 @@ def test__update_from_pb_success_defaults(self): self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) + instance._update_from_pb(instance_pb._pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 instance_pb = data_v2_pb2.Instance() instance = self._make_one(None, None) @@ -144,7 +145,7 @@ def test__update_from_pb_no_display_name(self): instance._update_from_pb(instance_pb) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums credentials = _make_credentials() @@ -156,7 +157,7 @@ def test_from_pb_success(self): instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, - type=instance_type, + type_=instance_type, labels=self.LABELS, state=state, ) @@ -172,7 +173,7 @@ def test_from_pb_success(self): self.assertEqual(instance._state, state) def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 instance_name = "INCORRECT_FORMAT" instance_pb = data_v2_pb2.Instance(name=instance_name) @@ -182,7 +183,7 @@ def test_from_pb_bad_instance_name(self): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" credentials = _make_credentials() @@ -199,14 +200,17 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(instance_pb, client) def test_name_property(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) + api.instance_path.return_value = "projects/project/instances/instance-id" # Patch the the API method. client._instance_admin_client = api @@ -261,41 +265,43 @@ def _instance_api_response_for_create(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name + messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), - instance_pb2.Instance, + instance.Instance, metadata_type=messages_v2_pb2.CreateInstanceMetadata, ) project_path_template = "projects/{}" location_path_template = "projects/{}/locations/{}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.create_instance.return_value = response instance_api.project_path = project_path_template.format instance_api.location_path = location_path_template.format + instance_api.common_location_path = location_path_template.format return instance_api, response def test_create(self): from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance + from google.cloud.bigtable_admin_v2.types import Cluster import warnings credentials = _make_credentials() @@ -310,6 +316,7 @@ def test_create(self): self.LABELS, ) instance_api, response = self._instance_api_response_for_create() + instance_api.common_project_path.return_value = "projects/project" client._instance_admin_client = instance_api serve_nodes = 3 @@ -318,22 +325,24 @@ def test_create(self): location_id=self.LOCATION_ID, serve_nodes=serve_nodes ) - cluster_pb = instance_pb2.Cluster( + cluster_pb = Cluster( location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), serve_nodes=serve_nodes, default_storage_type=enums.StorageType.UNSPECIFIED, ) - instance_pb = instance_pb2.Instance( + instance_pb = Instance( display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, + type_=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) cluster_id = "{}-cluster".format(self.INSTANCE_ID) instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id: cluster_pb}, + request={ + "parent": instance_api.project_path(self.PROJECT), + "instance_id": self.INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id: cluster_pb}, + } ) self.assertEqual(len(warned), 1) @@ -343,7 +352,9 @@ def test_create(self): def test_create_w_clusters(self): from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb + from google.cloud.bigtable_admin_v2.types import Instance as instance_pb credentials = _make_credentials() client = self._make_client( @@ -357,6 +368,7 @@ def test_create_w_clusters(self): self.LABELS, ) instance_api, response = self._instance_api_response_for_create() + instance_api.common_project_path.return_value = "projects/project" client._instance_admin_client = instance_api # Perform the method and check the result. @@ -383,36 +395,40 @@ def test_create_w_clusters(self): result = instance.create(clusters=clusters) - cluster_pb_1 = instance_pb2.Cluster( + cluster_pb_1 = cluster_pb( location=instance_api.location_path(self.PROJECT, location_id_1), serve_nodes=serve_nodes_1, default_storage_type=enums.StorageType.UNSPECIFIED, ) - cluster_pb_2 = instance_pb2.Cluster( + cluster_pb_2 = cluster_pb( location=instance_api.location_path(self.PROJECT, location_id_2), serve_nodes=serve_nodes_2, default_storage_type=enums.StorageType.UNSPECIFIED, ) - instance_pb = instance_pb2.Instance( + instance_pb = instance_pb( display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, + type_=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, + request={ + "parent": instance_api.project_path(self.PROJECT), + "instance_id": self.INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, + } ) self.assertIs(result, response) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.api_core import exceptions - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -426,9 +442,9 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_instance.side_effect = [ + instance_admin_stub = client._instance_admin_client + + instance_admin_stub.get_instance.side_effect = [ response_pb, exceptions.NotFound("testing"), exceptions.BadRequest("testing"), @@ -445,11 +461,13 @@ def test_exists(self): alt_instance_2.exists() def test_reload(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.cloud.bigtable import enums - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -460,12 +478,12 @@ def test_reload(self): DISPLAY_NAME = u"hey-hi-hello" instance_type = enums.Instance.Type.PRODUCTION response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS + display_name=DISPLAY_NAME, type_=instance_type, labels=self.LABELS ) # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = client._instance_admin_client.transport + bigtable_instance_stub = client._instance_admin_client bigtable_instance_stub.get_instance.side_effect = [response_pb] # Create expected_result. @@ -487,32 +505,32 @@ def _instance_api_response_for_update(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), - instance_pb2.Instance, + instance.Instance, metadata_type=messages_v2_pb2.UpdateInstanceMetadata, ) instance_path_template = "projects/{project}/instances/{instance}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.partial_update_instance.return_value = response instance_api.instance_path = instance_path_template.format return instance_api, response @@ -520,7 +538,7 @@ def _instance_api_response_for_update(self): def test_update(self): from google.cloud.bigtable import enums from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance credentials = _make_credentials() client = self._make_client( @@ -538,10 +556,10 @@ def test_update(self): result = instance.update() - instance_pb = instance_pb2.Instance( + instance_pb = Instance( name=instance.name, display_name=instance.display_name, - type=instance.type_, + type_=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask( @@ -549,14 +567,14 @@ def test_update(self): ) instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) self.assertIs(result, response) def test_update_empty(self): from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance credentials = _make_credentials() client = self._make_client( @@ -568,42 +586,46 @@ def test_update_empty(self): result = instance.update() - instance_pb = instance_pb2.Instance( + instance_pb = Instance( name=instance.name, display_name=instance.display_name, - type=instance.type_, + type_=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask() instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) self.assertIs(result, response) def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.delete_instance.return_value = None client._instance_admin_client = instance_api result = instance.delete() - instance_api.delete_instance.assert_called_once_with(instance.name) + instance_api.delete_instance.assert_called_once_with( + request={"name": instance.name} + ) self.assertIsNone(result) def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -620,16 +642,16 @@ def test_get_iam_policy(self): iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy # Perform the method and check the result. result = instance.get_iam_policy() - instance_api.get_iam_policy.assert_called_once_with(resource=instance.name) + instance_api.get_iam_policy.assert_called_once_with( + request={"resource": instance.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins @@ -638,7 +660,9 @@ def test_get_iam_policy(self): self.assertEqual(found, expected) def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2, options_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -655,9 +679,7 @@ def test_get_iam_policy_w_requested_policy_version(self): iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy @@ -665,8 +687,10 @@ def test_get_iam_policy_w_requested_policy_version(self): result = instance.get_iam_policy(requested_policy_version=3) instance_api.get_iam_policy.assert_called_once_with( - resource=instance.name, - options_=options_pb2.GetPolicyOptions(requested_policy_version=3), + request={ + "resource": instance.name, + "options_": options_pb2.GetPolicyOptions(requested_policy_version=3), + } ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -676,7 +700,9 @@ def test_get_iam_policy_w_requested_policy_version(self): self.assertEqual(found, expected) def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -694,9 +720,7 @@ def test_set_iam_policy(self): iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.set_iam_policy.return_value = iam_policy_pb client._instance_admin_client = instance_api @@ -710,7 +734,7 @@ def test_set_iam_policy(self): result = instance.set_iam_policy(iam_policy) instance_api.set_iam_policy.assert_called_once_with( - resource=instance.name, policy=iam_policy_pb + request={"resource": instance.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -720,7 +744,9 @@ def test_set_iam_policy(self): self.assertEqual(found, expected) def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -733,9 +759,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.test_iam_permissions.return_value = response client._instance_admin_client = instance_api @@ -743,7 +767,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) instance_api.test_iam_permissions.assert_called_once_with( - resource=instance.name, permissions=permissions + request={"resource": instance.name, "permissions": permissions} ) def test_cluster_factory(self): @@ -770,11 +794,13 @@ def test_cluster_factory(self): self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import Cluster @@ -805,9 +831,7 @@ def test_list_clusters(self): ) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.list_clusters.side_effect = [response_pb] instance_api.cluster_path = cluster_path_template.format client._instance_admin_client = instance_api @@ -838,25 +862,26 @@ def test_table_factory(self): self.assertEqual(table._app_profile_id, app_profile_id) def _list_tables_helper(self, table_name=None): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, + from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_messages_v1_pb2, ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client, - bigtable_instance_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + + table_api = mock.create_autospec(BigtableTableAdminClient) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) + instance_api.instance_path.return_value = instance.name # Create response_pb if table_name is None: table_name = self.TABLE_NAME @@ -868,7 +893,7 @@ def _list_tables_helper(self, table_name=None): # Patch the stub used by the API method. client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client bigtable_table_stub.list_tables.side_effect = [response_pb] # Create expected_result. @@ -939,8 +964,10 @@ def test_app_profile_factory(self): def test_list_app_profiles(self): from google.api_core.page_iterator import Iterator from google.api_core.page_iterator import Page - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.app_profile import AppProfile class _Iterator(Iterator): @@ -982,9 +1009,7 @@ def _next_page(self): iterator = _Iterator(pages=[app_profiles]) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.app_profile_path = app_profile_path_template.format instance_api.list_app_profiles.return_value = iterator diff --git a/tests/unit/test_policy.py b/tests/unit/test_policy.py index 939e02a9d..63f9ba03f 100644 --- a/tests/unit/test_policy.py +++ b/tests/unit/test_policy.py @@ -147,11 +147,7 @@ def test_from_pb_with_condition(self): }, } ] - message = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=BINDINGS, - ) + message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) klass = self._get_target_class() policy = klass.from_pb(message) self.assertEqual(policy.etag, ETAG) diff --git a/tests/unit/test_row.py b/tests/unit/test_row.py index 16a8232ec..6b5f4168b 100644 --- a/tests/unit/test_row.py +++ b/tests/unit/test_row.py @@ -120,7 +120,7 @@ def test_get_mutations_size(self): total_mutations_size = 0 for mutation in row._get_mutations(): - total_mutations_size += mutation.ByteSize() + total_mutations_size += mutation._pb.ByteSize() self.assertEqual(row.get_mutations_size(), total_mutations_size) @@ -282,7 +282,9 @@ def _delete_cells_helper(self, time_range=None): ) ) if time_range is not None: - expected_pb.delete_from_column.time_range.CopyFrom(time_range.to_pb()) + expected_pb.delete_from_column.time_range._pb.CopyFrom( + time_range.to_pb()._pb + ) self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cells_no_time_range(self): @@ -427,7 +429,7 @@ def test__get_mutations(self): def test_commit(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" row_key = b"row_key" @@ -439,7 +441,7 @@ def test_commit(self): column1 = b"column1" column2 = b"column2" - api = bigtable_client.BigtableClient(mock.Mock()) + api = mock.create_autospec(BigtableClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -456,7 +458,7 @@ def test_commit(self): response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) # Patch the stub used by the API method. - api.transport.check_and_mutate_row.side_effect = [response_pb] + api.check_and_mutate_row.side_effect = [response_pb] client._table_data_client = api # Create expected_result. @@ -468,8 +470,8 @@ def test_commit(self): row.delete_cell(column_family_id2, column2, state=True) row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) result = row.commit() - call_args = api.transport.check_and_mutate_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) + call_args = api.check_and_mutate_row.call_args + self.assertEqual(app_profile_id, call_args.app_profile_id[0]) self.assertEqual(result, expected_result) self.assertEqual(row._true_pb_mutations, []) self.assertEqual(row._false_pb_mutations, []) @@ -585,7 +587,7 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" row_key = b"row_key" @@ -594,7 +596,8 @@ def test_commit(self): column_family_id = u"column_family_id" column = b"column" - api = bigtable_client.BigtableClient(mock.Mock()) + api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -618,10 +621,11 @@ def mock_parse_rmw_row_response(row_response): # Perform the method and check the result. with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): + row._table._instance._client._table_data_client = api row.append_cell_value(column_family_id, column, value) result = row.commit() - call_args = api.transport.read_modify_write_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) + call_args = api.read_modify_write_row.call_args_list[0] + self.assertEqual(app_profile_id, call_args.app_profile_id[0]) self.assertEqual(result, expected_result) self.assertEqual(row._rule_pb_list, []) @@ -770,73 +774,73 @@ def test_it(self): def _CheckAndMutateRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) def _ReadModifyWriteRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ReadModifyWriteRule(*args, **kw) diff --git a/tests/unit/test_row_data.py b/tests/unit/test_row_data.py index c59da844b..21c0a582b 100644 --- a/tests/unit/test_row_data.py +++ b/tests/unit/test_row_data.py @@ -19,7 +19,7 @@ from google.api_core.exceptions import DeadlineExceeded from ._testing import _make_credentials from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 class MultiCallableStub(object): @@ -64,7 +64,7 @@ def _make_one(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 timestamp_micros = TestCell.timestamp_micros timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) @@ -453,7 +453,7 @@ def test_state_start(self): self.assertEqual(yrd.state, yrd.NEW_ROW) def test_state_new_row_w_row(self): - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -467,8 +467,9 @@ def test_state_new_row_w_row(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) + + data_api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -476,10 +477,10 @@ def test_state_new_row_w_row(self): client._table_data_client = data_api request = object() - yrd = self._make_one(client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.read_rows, request) self.assertEqual(yrd.retry._deadline, 60.0) - yrd._response_iterator = iterator + yrd.response_iterator = iterator rows = [row for row in yrd] result = rows[0] @@ -488,7 +489,7 @@ def test_state_new_row_w_row(self): self.assertEqual(yrd.state, yrd.NEW_ROW) def test_multiple_chunks(self): - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient chunk1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -508,8 +509,7 @@ def test_multiple_chunks(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) + data_api = mock.create_autospec(BigtableClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -517,9 +517,9 @@ def test_multiple_chunks(self): client._table_data_client = data_api request = object() - yrd = self._make_one(client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.read_rows, request) - yrd._response_iterator = iterator + yrd.response_iterator = iterator rows = [row for row in yrd] result = rows[0] self.assertEqual(result.row_key, self.ROW_KEY) @@ -544,7 +544,7 @@ def test__copy_from_previous_unset(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) cell = _PartialCellData() yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, b"") @@ -579,15 +579,18 @@ def test__copy_from_previous_blank(self): self.assertEqual(cell.labels, LABELS) def test__copy_from_previous_filled(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + ROW_KEY = "RK" FAMILY_NAME = u"A" QUALIFIER = b"C" TIMESTAMP_MICROS = 100 LABELS = ["L1", "L2"] client = _Client() - client._data_stub = mock.MagicMock() + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) yrd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -608,33 +611,37 @@ def test_valid_last_scanned_row_key_on_start(self): response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) yrd.last_scanned_row_key = "1.BEFORE" self._consume_all(yrd) self.assertEqual(yrd.last_scanned_row_key, "2.AFTER") def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk + from google.cloud.bigtable_v2.services.bigtable import BigtableClient client = _Client() chunks = _generate_cell_chunks([""]) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + client._data_stub = mock.create_autospec(BigtableClient) + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) with self.assertRaises(InvalidChunk): self._consume_all(yrd) def test_state_cell_in_progress(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + LABELS = ["L1", "L2"] request = object() - read_rows = mock.MagicMock() - yrd = self._make_one(read_rows, request) + client = _Client() + client._data_stub = mock.create_autospec(BigtableClient) + yrd = self._make_one(client._data_stub.read_rows, request) chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -657,6 +664,8 @@ def test_state_cell_in_progress(self): self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) def test_yield_rows_data(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + client = _Client() chunk = _ReadRowsResponseCellChunkPB( @@ -671,12 +680,13 @@ def test_yield_rows_data(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) result = self._consume_all(yrd)[0] @@ -726,9 +736,9 @@ def setUpClass(cls): cls.row_range3 = RowRange(b"row_key41", b"row_key49") cls.request = _ReadRowsRequestPB(table_name=cls.table_name) - cls.request.rows.row_ranges.add(**cls.row_range1.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range2.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range3.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range1.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range2.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range3.get_range_kwargs()) @staticmethod def _get_target_class(): @@ -796,9 +806,9 @@ def test__filter_row_ranges_all_ranges_already_read_open_closed(self): row_range3 = RowRange(b"row_key41", b"row_key49", False, True) request = _ReadRowsRequestPB(table_name=self.table_name) - request.rows.row_ranges.add(**row_range1.get_range_kwargs()) - request.rows.row_ranges.add(**row_range2.get_range_kwargs()) - request.rows.row_ranges.add(**row_range3.get_range_kwargs()) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + request.rows.row_ranges.append(row_range2.get_range_kwargs()) + request.rows.row_ranges.append(row_range3.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) @@ -827,13 +837,14 @@ def test__filter_row_ranges_some_ranges_already_read(self): def test_build_updated_request(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(**self.row_range1.get_range_kwargs()) + request.rows.row_ranges.append(self.row_range1.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) @@ -842,13 +853,17 @@ def test_build_updated_request(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add( + + row_range1 = RowRange( start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key ) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test_build_updated_request_full_table(self): + from google.cloud.bigtable_v2.types import RowRange + last_scanned_key = b"row_key14" request = _ReadRowsRequestPB(table_name=self.table_name) @@ -856,18 +871,21 @@ def test_build_updated_request_full_table(self): result = request_manager.build_updated_request() expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={}) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range1 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test_build_updated_request_no_start_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(end_key_open=b"row_key29") + row_range1 = RowRange(end_key_open=b"row_key29") + request.rows.row_ranges.append(row_range1) request_manager = self._make_one(request, last_scanned_key, 2) @@ -876,21 +894,26 @@ def test_build_updated_request_no_start_key(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add( + + row_range2 = RowRange( start_key_open=last_scanned_key, end_key_open=b"row_key29" ) + expected_result.rows.row_ranges.append(row_range2) self.assertEqual(expected_result, result) def test_build_updated_request_no_end_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(start_key_closed=b"row_key20") + + row_range1 = RowRange(start_key_closed=b"row_key20") + request.rows.row_ranges.append(row_range1) request_manager = self._make_one(request, last_scanned_key, 2) @@ -899,7 +922,8 @@ def test_build_updated_request_no_end_key(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range2 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range2) self.assertEqual(expected_result, result) @@ -934,6 +958,8 @@ def test_build_updated_request_rows(self): self.assertEqual(expected_result, result) def test_build_updated_request_rows_limit(self): + from google.cloud.bigtable_v2.types import RowRange + last_scanned_key = b"row_key14" request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10) @@ -943,7 +969,8 @@ def test_build_updated_request_rows_limit(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter={}, rows_limit=8 ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range1 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test__key_already_read(self): @@ -1074,14 +1101,17 @@ def test_invalid_last_row_missing_commit(self): _marker = object() def _match_results(self, testcase_name, expected_result=_marker): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + client = _Client() chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + data_api = mock.create_autospec(BigtableClient) + client._table_data_client = data_api + client._table_data_client.read_rows.side_effect = [iterator] request = object() - prd = self._make_one(client._data_stub.ReadRows, request) + prd = self._make_one(client._table_data_client.read_rows, request) prd.consume_all() flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: @@ -1216,6 +1246,7 @@ class _MockCancellableIterator(object): def __init__(self, *values): self.iter_values = iter(values) + self.last_scanned_row_key = "" def cancel(self): self.cancel_calls += 1 @@ -1239,6 +1270,7 @@ class _PartialCellData(object): family_name = u"" qualifier = None timestamp_micros = 0 + last_scanned_row_key = "" def __init__(self, **kw): self.labels = kw.pop("labels", []) @@ -1253,13 +1285,14 @@ def __init__(self, chunks, last_scanned_row_key=""): def _generate_cell_chunks(chunk_text_pbs): from google.protobuf.text_format import Merge - from google.cloud.bigtable_v2.proto.bigtable_pb2 import ReadRowsResponse + from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse chunks = [] for chunk_text_pb in chunk_text_pbs: chunk = ReadRowsResponse.CellChunk() - chunks.append(Merge(chunk_text_pb, chunk)) + chunk._pb = Merge(chunk_text_pb, chunk._pb) + chunks.append(chunk) return chunks @@ -1284,16 +1317,16 @@ def _parse_readrows_acceptance_tests(filename): def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 family_name = kw.pop("family_name", None) qualifier = kw.pop("qualifier", None) message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) if family_name: - message.family_name.value = family_name + message.family_name = family_name if qualifier: - message.qualifier.value = qualifier + message.qualifier = qualifier return message @@ -1305,7 +1338,7 @@ def _make_cell(value): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/tests/unit/test_row_filters.py b/tests/unit/test_row_filters.py index 02a912318..c42345ee0 100644 --- a/tests/unit/test_row_filters.py +++ b/tests/unit/test_row_filters.py @@ -1057,42 +1057,42 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ValueRange(*args, **kw) diff --git a/tests/unit/test_row_set.py b/tests/unit/test_row_set.py index a855099a1..c1fa4ca87 100644 --- a/tests/unit/test_row_set.py +++ b/tests/unit/test_row_set.py @@ -185,7 +185,7 @@ def test__update_message_request(self): expected_request = _ReadRowsRequestPB(table_name=table_name) expected_request.rows.row_keys.append(_to_bytes("row_key1")) - expected_request.rows.row_ranges.add(**row_range1.get_range_kwargs()) + expected_request.rows.row_ranges.append(row_range1.get_range_kwargs()) self.assertEqual(request, expected_request) @@ -270,6 +270,6 @@ def test_get_range_kwargs_open_closed(self): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index 4469846b1..c52119192 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -47,7 +47,8 @@ def test_w_too_many_mutations(self): def test_normal(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.proto import bigtable_pb2 + from google.cloud.bigtable_v2.types import MutateRowsRequest + from google.cloud.bigtable_v2.types import data table = mock.Mock(spec=["name"]) table.name = "table" @@ -60,22 +61,23 @@ def test_normal(self): result = self._call_fut("table", rows) - Entry = bigtable_pb2.MutateRowsRequest.Entry - - entry_1 = Entry(row_key=b"row_key") - mutations_1 = entry_1.mutations.add() + entry_1 = MutateRowsRequest.Entry() + entry_1.row_key = b"row_key" + mutations_1 = data.Mutation() mutations_1.set_cell.family_name = "cf1" mutations_1.set_cell.column_qualifier = b"c1" mutations_1.set_cell.timestamp_micros = -1 mutations_1.set_cell.value = b"1" + entry_1.mutations.append(mutations_1) - entry_2 = Entry(row_key=b"row_key_2") - mutations_2 = entry_2.mutations.add() + entry_2 = MutateRowsRequest.Entry() + entry_2.row_key = b"row_key_2" + mutations_2 = data.Mutation() mutations_2.set_cell.family_name = "cf1" mutations_2.set_cell.column_qualifier = b"c1" mutations_2.set_cell.timestamp_micros = -1 mutations_2.set_cell.value = b"2" - + entry_2.mutations.append(mutations_2) self.assertEqual(result, [entry_1, entry_2]) @@ -141,7 +143,7 @@ class TestTable(unittest.TestCase): ROW_KEY_1 = b"row-key-1" ROW_KEY_2 = b"row-key-2" ROW_KEY_3 = b"row-key-3" - FAMILY_NAME = u"family" + FAMILY_NAME = "family" QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 VALUE = b"value" @@ -322,16 +324,16 @@ def test___ne__(self): self.assertNotEqual(table1, table2) def _create_test_helper(self, split_keys=[], column_families={}): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable_admin_v2.proto import table_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) + from google.cloud.bigtable_admin_v2.types import table as table_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_messages_v2_pb2, ) from google.cloud.bigtable.column_family import ColumnFamily - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -354,10 +356,12 @@ def _create_test_helper(self, split_keys=[], column_families={}): splits = [split(key=split_key) for split_key in split_keys] table_api.create_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table=table_pb2.Table(column_families=families), - table_id=self.TABLE_ID, - initial_splits=splits, + request={ + "parent": self.INSTANCE_NAME, + "table": table_pb2.Table(column_families=families), + "table_id": self.TABLE_ID, + "initial_splits": splits, + } ) def test_create(self): @@ -373,35 +377,44 @@ def test_create_with_split_keys(self): self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) def test_exists(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, + from google.cloud.bigtable_admin_v2.types import ListTablesResponse + from google.cloud.bigtable_admin_v2.types import Table + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as table_admin_client, ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, - bigtable_table_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + client as instance_admin_client, ) from google.api_core.exceptions import NotFound from google.api_core.exceptions import BadRequest - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + table_api = mock.create_autospec(table_admin_client.BigtableTableAdminClient) + instance_api = mock.create_autospec( + instance_admin_client.BigtableInstanceAdminClient ) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) # Create response_pb - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)] - ) + response_pb = ListTablesResponse(tables=[Table(name=self.TABLE_NAME)]) # Patch API calls client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client + + bigtable_table_stub.get_table.side_effect = [ + response_pb, + NotFound("testing"), + BadRequest("testing"), + ] + + client._table_admin_client = table_api + client._instance_admin_client = instance_api + bigtable_table_stub = client._table_admin_client bigtable_table_stub.get_table.side_effect = [ response_pb, NotFound("testing"), @@ -422,11 +435,11 @@ def test_exists(self): table2.exists() def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -445,9 +458,11 @@ def test_delete(self): self.assertEqual(result, expected_result) def _list_column_families_helper(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -462,7 +477,7 @@ def _list_column_families_helper(self): # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client bigtable_table_stub.get_table.side_effect = [response_pb] # Create expected_result. @@ -476,7 +491,9 @@ def test_list_column_families(self): self._list_column_families_helper() def test_get_cluster_states(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState @@ -484,7 +501,7 @@ def test_get_cluster_states(self): PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -502,14 +519,15 @@ def test_get_cluster_states(self): # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client + bigtable_table_stub.get_table.side_effect = [response_pb] # build expected result expected_result = { - u"cluster-id1": ClusterState(INITIALIZING), - u"cluster-id2": ClusterState(PLANNED_MAINTENANCE), - u"cluster-id3": ClusterState(READY), + "cluster-id1": ClusterState(INITIALIZING), + "cluster-id2": ClusterState(PLANNED_MAINTENANCE), + "cluster-id3": ClusterState(READY), } # Perform the method and check the result. @@ -521,14 +539,14 @@ def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_filters import RowSampleFilter - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -554,10 +572,8 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._table_data_client = data_api client._table_admin_client = table_api - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) - + client._table_data_client.read_rows.side_effect = [response_iterator] + table._instance._client._table_data_client = client._table_data_client # Perform the method and check the result. filter_obj = RowSampleFilter(0.33) result = None @@ -618,7 +634,7 @@ def test_read_row_more_than_one_row_returned(self): timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, - ) + )._pb chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, @@ -626,7 +642,7 @@ def test_read_row_more_than_one_row_returned(self): timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, - ) + )._pb chunks = [chunk_1, chunk_2] with self.assertRaises(ValueError): @@ -650,11 +666,11 @@ def _mutate_rows_helper( ): from google.rpc.status_pb2 import Status from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -737,14 +753,14 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -797,12 +813,14 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_read_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.api_core import retry - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -840,7 +858,9 @@ def test_read_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + client._table_data_client.read_rows = mock.Mock( side_effect=[ response_failure_iterator_1, response_failure_iterator_2, @@ -848,6 +868,8 @@ def test_read_retry_rows(self): ] ) + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api rows = [] for row in table.read_rows( start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows @@ -858,12 +880,14 @@ def test_read_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) import warnings - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -899,13 +923,16 @@ def test_yield_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] - ) + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + client._table_data_client.read_rows.side_effect = [ + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] rows = [] with warnings.catch_warnings(record=True) as warned: @@ -921,14 +948,16 @@ def test_yield_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_rows_with_row_set(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange import warnings - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -972,9 +1001,12 @@ def test_yield_rows_with_row_set(self): response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + client._table_data_client.read_rows.side_effect = [response_iterator] rows = [] row_set = RowSet() @@ -995,11 +1027,13 @@ def test_yield_rows_with_row_set(self): self.assertEqual(rows[2].row_key, self.ROW_KEY_3) def test_sample_row_keys(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1013,10 +1047,7 @@ def test_sample_row_keys(self): response_iterator = object() # Just passed to a mock. # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["sample_row_keys"] = mock.Mock( - side_effect=[[response_iterator]] - ) + client._table_data_client.sample_row_keys.side_effect = [[response_iterator]] # Create expected_result. expected_result = response_iterator @@ -1026,13 +1057,13 @@ def test_sample_row_keys(self): self.assertEqual(result[0], expected_result) def test_truncate(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1047,19 +1078,19 @@ def test_truncate(self): result = table.truncate() table_api.drop_row_range.assert_called_once_with( - name=self.TABLE_NAME, delete_all_data_from_table=True + request={"name": self.TABLE_NAME, "delete_all_data_from_table": True} ) self.assertEqual(result, expected_result) def test_truncate_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1077,13 +1108,13 @@ def test_truncate_w_timeout(self): self.assertEqual(result, expected_result) def test_drop_by_prefix(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1102,13 +1133,13 @@ def test_drop_by_prefix(self): self.assertEqual(result, expected_result) def test_drop_by_prefix_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1140,7 +1171,9 @@ def test_mutations_batcher_factory(self): self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -1157,15 +1190,15 @@ def test_get_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy result = table.get_iam_policy() - table_api.get_iam_policy.assert_called_once_with(resource=table.name) + table_api.get_iam_policy.assert_called_once_with( + request={"resource": table.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins @@ -1174,7 +1207,9 @@ def test_get_iam_policy(self): self.assertEqual(found, expected) def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -1192,9 +1227,7 @@ def test_set_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -1207,7 +1240,7 @@ def test_set_iam_policy(self): result = table.set_iam_policy(iam_policy) table_api.set_iam_policy.assert_called_once_with( - resource=table.name, policy=iam_policy_pb + request={"resource": table.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -1217,7 +1250,9 @@ def test_set_iam_policy(self): self.assertEqual(found, expected) def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -1231,9 +1266,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api @@ -1241,7 +1274,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) table_api.test_iam_permissions.assert_called_once_with( - resource=table.name, permissions=permissions + request={"resource": table.name, "permissions": permissions} ) def test_backup_factory_defaults(self): @@ -1274,9 +1307,7 @@ def test_backup_factory_non_defaults(self): table = self._make_one(self.TABLE_ID, instance) timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) backup = table.backup( - self.BACKUP_ID, - cluster_id=self.CLUSTER_ID, - expire_time=timestamp, + self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, ) self.assertIsInstance(backup, Backup) @@ -1293,18 +1324,20 @@ def test_backup_factory_non_defaults(self): self.assertIsNone(backup._state) def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, - bigtable_table_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2, - table_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin, + Backup as backup_pb, ) from google.cloud.bigtable.backup import Backup - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + table_api = mock.create_autospec(BigtableTableAdminClient) client = self._make_client( project=self.PROJECT_ID, credentials=_make_credentials(), admin=True ) @@ -1313,19 +1346,20 @@ def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): client._instance_admin_client = instance_api client._table_admin_client = table_api + table._instance._client._instance_admin_client = instance_api + table._instance._client._table_admin_client = table_api parent = self.INSTANCE_NAME + "/clusters/cluster" - backups_pb = bigtable_table_admin_pb2.ListBackupsResponse( + backups_pb = bigtable_table_admin.ListBackupsResponse( backups=[ - table_pb2.Backup(name=parent + "/backups/op1"), - table_pb2.Backup(name=parent + "/backups/op2"), - table_pb2.Backup(name=parent + "/backups/op3"), + backup_pb(name=parent + "/backups/op1"), + backup_pb(name=parent + "/backups/op2"), + backup_pb(name=parent + "/backups/op3"), ] ) - api = table_api._inner_api_calls["list_backups"] = mock.Mock( - return_value=backups_pb - ) + table_api.list_backups.return_value = backups_pb + api = table._instance._client._table_admin_client.list_backups backups_filter = "source_table:{}".format(self.TABLE_NAME) if filter_: @@ -1340,16 +1374,21 @@ def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): cluster_id = "-" parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id) - expected_metadata = [ - ("x-goog-request-params", "parent={}".format(parent)), - ] + order_by = None + page_size = 0 + if "order_by" in kwargs: + order_by = kwargs["order_by"] + + if "page_size" in kwargs: + page_size = kwargs["page_size"] + api.assert_called_once_with( - bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, filter=backups_filter, **kwargs - ), - retry=mock.ANY, - timeout=mock.ANY, - metadata=expected_metadata, + request={ + "parent": parent, + "filter": backups_filter, + "order_by": order_by, + "page_size": page_size, + } ) def test_list_backups_defaults(self): @@ -1362,20 +1401,23 @@ def test_list_backups_w_options(self): def _restore_helper(self, backup_name=None): from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance op_future = object() - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT_ID, credentials=credentials, admin=True + ) - client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api) instance = Instance(self.INSTANCE_ID, client=client) table = self._make_one(self.TABLE_ID, instance) - api = client.table_admin_client = mock.create_autospec( - BigtableTableAdminClient, instance=True + api = client._table_admin_client = mock.create_autospec( + BigtableTableAdminClient ) + api.restore_table.return_value = op_future + table._instance._client._table_admin_client = api if backup_name: future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME) @@ -1384,9 +1426,11 @@ def _restore_helper(self, backup_name=None): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_restore_table_w_backup_id(self): @@ -1445,7 +1489,7 @@ def _make_responses_statuses(self, codes): def _make_responses(self, codes): import six - from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse + from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse from google.rpc.status_pb2 import Status entries = [ @@ -1455,13 +1499,13 @@ def _make_responses(self, codes): return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1478,8 +1522,10 @@ def test_callable_empty_rows(self): def test_callable_no_retry_strategy(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1491,8 +1537,9 @@ def test_callable_no_retry_strategy(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1509,12 +1556,16 @@ def test_callable_no_retry_strategy(self): row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] response = self._make_responses(response_codes) data_api.mutate_rows = mock.MagicMock(return_value=[response]) + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + + table._instance._client._table_data_client.mutate_rows.return_value = [response] + + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=None) result = [status.code for status in statuses] @@ -1525,8 +1576,10 @@ def test_callable_no_retry_strategy(self): def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1539,8 +1592,9 @@ def test_callable_retry(self): # - State of responses_statuses should be # [success, success, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1549,7 +1603,6 @@ def test_callable_retry(self): client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) @@ -1563,9 +1616,9 @@ def test_callable_retry(self): response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock( - side_effect=[[response_1], [response_2]] - ) + client._table_data_client.mutate_rows.side_effect = [[response_1], [response_2]] + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1574,17 +1627,15 @@ def test_callable_retry(self): result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] - self.assertEqual( - client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2 - ) + self.assertEqual(client._table_data_client.mutate_rows.call_count, 2) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1600,8 +1651,10 @@ def test_do_mutate_retryable_rows_empty_rows(self): def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 2 rows. @@ -1610,8 +1663,9 @@ def test_do_mutate_retryable_rows(self): # Expectation: # - Expect [success, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1629,8 +1683,9 @@ def test_do_mutate_retryable_rows(self): response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() @@ -1643,8 +1698,10 @@ def test_do_mutate_retryable_rows(self): def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1655,8 +1712,8 @@ def test_do_mutate_retryable_rows_retry(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1678,8 +1735,10 @@ def test_do_mutate_retryable_rows_retry(self): ) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1695,8 +1754,10 @@ def test_do_mutate_retryable_rows_retry(self): def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 4 rows. @@ -1712,8 +1773,8 @@ def test_do_mutate_retryable_rows_second_retry(self): # - Exception contains response whose index should be '3' even though # only two rows were retried. - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1735,8 +1796,10 @@ def test_do_mutate_retryable_rows_second_retry(self): response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( @@ -1759,8 +1822,10 @@ def test_do_mutate_retryable_rows_second_retry(self): def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 4 rows. @@ -1772,8 +1837,8 @@ def test_do_mutate_retryable_rows_second_try(self): # - After second try: # [success, non-retryable, non-retryable, success] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1795,8 +1860,10 @@ def test_do_mutate_retryable_rows_second_try(self): response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( @@ -1817,7 +1884,9 @@ def test_do_mutate_retryable_rows_second_try(self): def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 2 rows. @@ -1827,9 +1896,7 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # Expectation: # - After second try: [success, non-retryable] - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1848,6 +1915,8 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): [self.SUCCESS, self.NON_RETRYABLE] ) + table._instance._client._table_admin_client = table_api + statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] @@ -1857,11 +1926,13 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1879,8 +1950,10 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): @@ -1924,33 +1997,42 @@ def test_row_range_row_set_conflict(self): self._call_fut(None, end_key=object(), row_set=object()) def test_row_range_start_key(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" result = self._call_fut(table_name, start_key=start_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(start_key_closed=start_key) + row_range = RowRange(start_key_closed=start_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_end_key(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" end_key = b"end_key" result = self._call_fut(table_name, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(end_key_open=end_key) + row_range = RowRange(end_key_open=end_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" end_key = b"end_key" result = self._call_fut(table_name, start_key=start_key, end_key=end_key) + row_range = RowRange(start_key_closed=start_key, end_key_open=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_open=end_key - ) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_both_keys_inclusive(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" end_key = b"end_key" @@ -1958,9 +2040,8 @@ def test_row_range_both_keys_inclusive(self): table_name, start_key=start_key, end_key=end_key, end_inclusive=True ) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_closed=end_key - ) + row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_with_filter(self): @@ -2002,7 +2083,7 @@ def test_with_app_profile_id(self): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) @@ -2094,24 +2175,24 @@ def test__repr__(self): def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 family_name = kw.pop("family_name") qualifier = kw.pop("qualifier") message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) - message.family_name.value = family_name - message.qualifier.value = qualifier + message.family_name = family_name + message.qualifier = qualifier return message def _ReadRowsResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) @@ -2130,6 +2211,9 @@ class _MockFailureIterator_1(object): def next(self): raise DeadlineExceeded("Failed to read from server") + def __init__(self, last_scanned_row_key=""): + self.last_scanned_row_key = last_scanned_row_key + __next__ = next @@ -2137,6 +2221,7 @@ class _MockFailureIterator_2(object): def __init__(self, *values): self.iter_values = values[0] self.calls = 0 + self.last_scanned_row_key = "" def next(self): self.calls += 1 @@ -2155,19 +2240,19 @@ def __init__(self, chunks, last_scanned_row_key=""): def _TablePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) def _ClusterStatePB(replication_state): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.Table.ClusterState(replication_state=replication_state)